diff options
Diffstat (limited to 'arch')
305 files changed, 9682 insertions, 6417 deletions
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 766fdfde2b7a..9b0d40093c9a 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -27,7 +27,7 @@ #define get_ds() (KERNEL_DS) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* * Is a address valid? This does a straightforward calculation rather @@ -39,13 +39,13 @@ * - AND "addr+size" doesn't have any high-bits set * - OR we are in kernel mode. */ -#define __access_ok(addr,size,segment) \ +#define __access_ok(addr, size, segment) \ (((segment).seg & (addr | size | (addr+size))) == 0) -#define access_ok(type,addr,size) \ +#define access_ok(type, addr, size) \ ({ \ __chk_user_ptr(addr); \ - __access_ok(((unsigned long)(addr)),(size),get_fs()); \ + __access_ok(((unsigned long)(addr)), (size), get_fs()); \ }) /* @@ -60,20 +60,20 @@ * (a) re-use the arguments for side effects (sizeof/typeof is ok) * (b) require any knowledge of processes at this stage */ -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) /* * The "__xxx" versions do not do address space checking, useful when * doing multiple accesses to the same area (the programmer has to do the * checks by hand with "access_ok()") */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) /* * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to @@ -84,7 +84,7 @@ extern void __get_user_unknown(void); -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err = 0; \ unsigned long __gu_val; \ @@ -96,16 +96,16 @@ extern void __get_user_unknown(void); case 8: __get_user_64(ptr); break; \ default: __get_user_unknown(); break; \ } \ - (x) = (__typeof__(*(ptr))) __gu_val; \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size,segment) \ +#define __get_user_check(x, ptr, size, segment) \ ({ \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ + if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ __gu_err = 0; \ switch (size) { \ case 1: __get_user_8(__gu_addr); break; \ @@ -115,7 +115,7 @@ extern void __get_user_unknown(void); default: __get_user_unknown(); break; \ } \ } \ - (x) = (__typeof__(*(ptr))) __gu_val; \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) @@ -201,31 +201,31 @@ struct __large_struct { unsigned long buf[100]; }; extern void __put_user_unknown(void); -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_8(x,ptr); break; \ - case 2: __put_user_16(x,ptr); break; \ - case 4: __put_user_32(x,ptr); break; \ - case 8: __put_user_64(x,ptr); break; \ + case 1: __put_user_8(x, ptr); break; \ + case 2: __put_user_16(x, ptr); break; \ + case 4: __put_user_32(x, ptr); break; \ + case 8: __put_user_64(x, ptr); break; \ default: __put_user_unknown(); break; \ } \ __pu_err; \ }) -#define __put_user_check(x,ptr,size,segment) \ +#define __put_user_check(x, ptr, size, segment) \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ + if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ __pu_err = 0; \ switch (size) { \ - case 1: __put_user_8(x,__pu_addr); break; \ - case 2: __put_user_16(x,__pu_addr); break; \ - case 4: __put_user_32(x,__pu_addr); break; \ - case 8: __put_user_64(x,__pu_addr); break; \ + case 1: __put_user_8(x, __pu_addr); break; \ + case 2: __put_user_16(x, __pu_addr); break; \ + case 4: __put_user_32(x, __pu_addr); break; \ + case 8: __put_user_64(x, __pu_addr); break; \ default: __put_user_unknown(); break; \ } \ } \ @@ -237,7 +237,7 @@ extern void __put_user_unknown(void); * instead of writing: this is because they do not write to * any memory gcc knows about, so there are no aliasing issues */ -#define __put_user_64(x,addr) \ +#define __put_user_64(x, addr) \ __asm__ __volatile__("1: stq %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -247,7 +247,7 @@ __asm__ __volatile__("1: stq %r2,%1\n" \ : "=r"(__pu_err) \ : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) -#define __put_user_32(x,addr) \ +#define __put_user_32(x, addr) \ __asm__ __volatile__("1: stl %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -260,7 +260,7 @@ __asm__ __volatile__("1: stl %r2,%1\n" \ #ifdef __alpha_bwx__ /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ -#define __put_user_16(x,addr) \ +#define __put_user_16(x, addr) \ __asm__ __volatile__("1: stw %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -270,7 +270,7 @@ __asm__ __volatile__("1: stw %r2,%1\n" \ : "=r"(__pu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) -#define __put_user_8(x,addr) \ +#define __put_user_8(x, addr) \ __asm__ __volatile__("1: stb %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -283,7 +283,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ /* Unfortunately, we can't get an unaligned access trap for the sub-word write, so we have to do a general unaligned operation. */ -#define __put_user_16(x,addr) \ +#define __put_user_16(x, addr) \ { \ long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ __asm__ __volatile__( \ @@ -308,13 +308,13 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ " .long 4b - .\n" \ " lda $31, 5b-4b(%0)\n" \ ".previous" \ - : "=r"(__pu_err), "=&r"(__pu_tmp1), \ - "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ + : "=r"(__pu_err), "=&r"(__pu_tmp1), \ + "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ "=&r"(__pu_tmp4) \ : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ } -#define __put_user_8(x,addr) \ +#define __put_user_8(x, addr) \ { \ long __pu_tmp1, __pu_tmp2; \ __asm__ __volatile__( \ @@ -330,7 +330,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ " .long 2b - .\n" \ " lda $31, 3b-2b(%0)\n" \ ".previous" \ - : "=r"(__pu_err), \ + : "=r"(__pu_err), \ "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ } @@ -366,7 +366,7 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) : __module_address(__copy_user) "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) - : "$1","$2","$3","$4","$5","$28","memory"); + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); return __cu_len; } @@ -379,15 +379,15 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali return len; } -#define __copy_to_user(to,from,n) \ +#define __copy_to_user(to, from, n) \ ({ \ __chk_user_ptr(to); \ - __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ + __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ }) -#define __copy_from_user(to,from,n) \ +#define __copy_from_user(to, from, n) \ ({ \ __chk_user_ptr(from); \ - __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ + __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ }) #define __copy_to_user_inatomic __copy_to_user @@ -418,7 +418,7 @@ __clear_user(void __user *to, long len) : "=r"(__cl_len), "=r"(__cl_to) : __module_address(__do_clear_user) "0"(__cl_len), "1"(__cl_to) - : "$1","$2","$3","$4","$5","$28","memory"); + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); return __cl_len; } diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index a098d7c05e96..cfb5052239a1 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -112,7 +112,7 @@ chan_allocation_order = <0>; chan_priority = <1>; block_size = <0x7ff>; - data_width = <2 0 0 0>; + data_width = <2>; clocks = <&ahb_clk>; clock-names = "hclk"; }; diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 6cc25ed912ee..2c6248d9a9ef 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -195,6 +195,7 @@ &usb0 { status = "okay"; + dr_mode = "peripheral"; }; &usb1 { diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index f9a17e2ca8cb..0198f5a62b96 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts @@ -133,20 +133,6 @@ >; }; - i2c1_pins_default: i2c1_pins_default { - pinctrl-single,pins = < - 0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */ - 0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */ - >; - }; - - i2c1_pins_sleep: i2c1_pins_sleep { - pinctrl-single,pins = < - 0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */ - 0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */ - >; - }; - mmc1_pins_default: pinmux_mmc1_pins_default { pinctrl-single,pins = < 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */ @@ -254,7 +240,7 @@ status = "okay"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&i2c0_pins_default>; - pinctrl-1 = <&i2c0_pins_default>; + pinctrl-1 = <&i2c0_pins_sleep>; clock-frequency = <400000>; at24@50 { @@ -262,17 +248,10 @@ pagesize = <64>; reg = <0x50>; }; -}; - -&i2c1 { - status = "okay"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&i2c1_pins_default>; - pinctrl-1 = <&i2c1_pins_default>; - clock-frequency = <400000>; tps: tps62362@60 { compatible = "ti,tps62362"; + reg = <0x60>; regulator-name = "VDD_MPU"; regulator-min-microvolt = <950000>; regulator-max-microvolt = <1330000>; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 03750af3b49a..6463f9ef2b54 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -549,14 +549,6 @@ pinctrl-0 = <&usb1_pins>; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb2 { dr_mode = "peripheral"; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 5126f9e77a98..ff5fb6ab0b97 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -70,6 +70,26 @@ }; }; + i2c0: i2c@18008000 { + compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c"; + reg = <0x18008000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>; + clock-frequency = <100000>; + status = "disabled"; + }; + + i2c1: i2c@1800b000 { + compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c"; + reg = <0x1800b000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>; + clock-frequency = <100000>; + status = "disabled"; + }; + uart0: serial@18020000 { compatible = "snps,dw-apb-uart"; reg = <0x18020000 0x100>; diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi index d2d8e94e0aa2..f46329c8ad75 100644 --- a/arch/arm/boot/dts/bcm63138.dtsi +++ b/arch/arm/boot/dts/bcm63138.dtsi @@ -66,8 +66,9 @@ reg = <0x1d000 0x1000>; cache-unified; cache-level = <2>; - cache-sets = <16>; - cache-size = <0x80000>; + cache-size = <524288>; + cache-sets = <1024>; + cache-line-size = <32>; interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index 857d0289ad4d..d3a29c1b8417 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts @@ -35,6 +35,18 @@ DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */ >; }; + + usb0_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ + >; + }; + + usb1_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB1_DRVVBUS */ + >; + }; }; &i2c1 { @@ -127,3 +139,16 @@ &mmc1 { vmmc-supply = <&vmmcsd_fixed>; }; + +/* At least dm8168-evm rev c won't support multipoint, later may */ +&usb0 { + pinctrl-names = "default"; + pinctrl-0 = <&usb0_pins>; + mentor,multipoint = <0>; +}; + +&usb1 { + pinctrl-names = "default"; + pinctrl-0 = <&usb1_pins>; + mentor,multipoint = <0>; +}; diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index d98d0f7de380..3c97b5f2addc 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -97,10 +97,31 @@ /* Device Configuration Registers */ scm_conf: syscon@600 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x600 0x110>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x600 0x110>; + + usb_phy0: usb-phy@20 { + compatible = "ti,dm8168-usb-phy"; + reg = <0x20 0x8>; + reg-names = "phy"; + clocks = <&main_fapll 6>; + clock-names = "refclk"; + #phy-cells = <0>; + syscon = <&scm_conf>; + }; + + usb_phy1: usb-phy@28 { + compatible = "ti,dm8168-usb-phy"; + reg = <0x28 0x8>; + reg-names = "phy"; + clocks = <&main_fapll 6>; + clock-names = "refclk"; + #phy-cells = <0>; + syscon = <&scm_conf>; + }; }; scrm_clocks: clocks { @@ -357,7 +378,10 @@ reg-names = "mc", "control"; interrupts = <18>; interrupt-names = "mc"; - dr_mode = "otg"; + dr_mode = "host"; + interface-type = <0>; + phys = <&usb_phy0>; + phy-names = "usb2-phy"; mentor,multipoint = <1>; mentor,num-eps = <16>; mentor,ram-bits = <12>; @@ -366,13 +390,15 @@ usb1: usb@47401800 { compatible = "ti,musb-am33xx"; - status = "disabled"; reg = <0x47401c00 0x400 0x47401800 0x200>; reg-names = "mc", "control"; interrupts = <19>; interrupt-names = "mc"; - dr_mode = "otg"; + dr_mode = "host"; + interface-type = <0>; + phys = <&usb_phy1>; + phy-names = "usb2-phy"; mentor,multipoint = <1>; mentor,num-eps = <16>; mentor,ram-bits = <12>; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 746cddb1b8f5..3290a96ba586 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -543,14 +543,6 @@ }; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb1 { dr_mode = "peripheral"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5827fedafd43..127608d79033 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -249,8 +249,8 @@ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4ae10000 { @@ -1090,8 +1090,8 @@ <0x4A096800 0x40>; /* pll_ctrl */ reg-names = "phy_rx", "phy_tx", "pll_ctrl"; ctrl-module = <&omap_control_sata>; - clocks = <&sys_clkin1>; - clock-names = "sysclk"; + clocks = <&sys_clkin1>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; #phy-cells = <0>; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index 4d8711713610..e0264d0bf7b9 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -380,14 +380,6 @@ phy-supply = <&ldo4_reg>; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb1 { dr_mode = "peripheral"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index 59d1c297bb30..578fa2a54dce 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -87,8 +87,8 @@ <14>, <15>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <64>; + dma-channels = <32>; + dma-requests = <64>; }; i2c1: i2c@48070000 { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 60403273f83e..db80f9d376fa 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -16,6 +16,13 @@ model = "Nokia N900"; compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; + aliases { + i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + }; + cpus { cpu@0 { cpu0-supply = <&vcc>; @@ -704,7 +711,7 @@ compatible = "smsc,lan91c94"; interrupt-parent = <&gpio2>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ - reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ + reg = <1 0 0xf>; /* 16 byte IO range */ bank-width = <2>; pinctrl-names = "default"; pinctrl-0 = <ðernet_pins>; diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 01b71111bd55..f4f78c40b564 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -155,8 +155,8 @@ <14>, <15>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <96>; + dma-channels = <32>; + dma-requests = <96>; }; omap3_pmx_core: pinmux@48002030 { diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 074147cebae4..87401d9f4d8b 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -223,8 +223,8 @@ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4a310000 { diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index b321fdf42c9f..ddff674bd05e 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -238,8 +238,8 @@ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4ae10000 { @@ -929,8 +929,8 @@ <0x4A096800 0x40>; /* pll_ctrl */ reg-names = "phy_rx", "phy_tx", "pll_ctrl"; ctrl-module = <&omap_control_sata>; - clocks = <&sys_clkin>; - clock-names = "sysclk"; + clocks = <&sys_clkin>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; #phy-cells = <0>; }; }; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index a6eb5436d26d..40accc87e3a2 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -117,7 +117,7 @@ chan_priority = <1>; block_size = <0xfff>; dma-masters = <2>; - data_width = <3 3 0 0>; + data_width = <3 3>; }; dma@eb000000 { @@ -133,7 +133,7 @@ chan_allocation_order = <1>; chan_priority = <1>; block_size = <0xfff>; - data_width = <3 3 0 0>; + data_width = <3 3>; }; fsmc: flash@b0000000 { diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 8ca3c1a2063d..5c2925831f20 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -294,35 +294,43 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mmc3_clk: clk@01c20094 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20094 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc3"; + clock-output-names = "mmc3", + "mmc3_output", + "mmc3_sample"; }; ts_clk: clk@01c20098 { @@ -468,8 +476,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <32>; status = "disabled"; }; @@ -477,8 +491,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <33>; status = "disabled"; }; @@ -486,8 +506,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <34>; status = "disabled"; }; @@ -495,8 +521,14 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&ahb_gates 11>, <&mmc3_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 11>, + <&mmc3_clk 0>, + <&mmc3_clk 1>, + <&mmc3_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <35>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 905f84d141f0..2fd8988f310c 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -218,27 +218,33 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; ts_clk: clk@01c20098 { @@ -368,8 +374,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <32>; status = "disabled"; }; @@ -377,8 +389,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <33>; status = "disabled"; }; @@ -386,8 +404,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <34>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 4910393d1b09..f8818f1edbbe 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -257,27 +257,33 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; ts_clk: clk@01c20098 { @@ -391,8 +397,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <32>; status = "disabled"; }; @@ -400,8 +412,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <34>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 47e557656993..fa2f403ccf28 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -190,19 +190,11 @@ clock-output-names = "axi"; }; - ahb1_mux: ahb1_mux@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun6i-a31-ahb1-mux-clk"; - reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>; - clock-output-names = "ahb1_mux"; - }; - ahb1: ahb1@01c20054 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-ahb-clk"; + compatible = "allwinner,sun6i-a31-ahb1-clk"; reg = <0x01c20054 0x4>; - clocks = <&ahb1_mux>; + clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>; clock-output-names = "ahb1"; }; @@ -265,35 +257,43 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mmc3_clk: clk@01c20094 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20094 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc3"; + clock-output-names = "mmc3", + "mmc3_output", + "mmc3_sample"; }; spi0_clk: clk@01c200a0 { @@ -383,15 +383,21 @@ #dma-cells = <1>; /* DMA controller requires AHB1 clocked from PLL6 */ - assigned-clocks = <&ahb1_mux>; + assigned-clocks = <&ahb1>; assigned-clock-parents = <&pll6 0>; }; mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb1_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 8>; reset-names = "ahb"; interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>; @@ -401,8 +407,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb1_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 9>; reset-names = "ahb"; interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>; @@ -412,8 +424,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb1_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 10>; reset-names = "ahb"; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; @@ -423,8 +441,14 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&ahb1_gates 11>, <&mmc3_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 11>, + <&mmc3_clk 0>, + <&mmc3_clk 1>, + <&mmc3_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 11>; reset-names = "ahb"; interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 786d491542ac..3a8530b79f1c 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -337,35 +337,43 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mmc3_clk: clk@01c20094 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20094 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc3"; + clock-output-names = "mmc3", + "mmc3_output", + "mmc3_sample"; }; ts_clk: clk@01c20098 { @@ -583,8 +591,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -592,8 +606,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -601,8 +621,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -610,8 +636,14 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&ahb_gates 11>, <&mmc3_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 11>, + <&mmc3_clk 0>, + <&mmc3_clk 1>, + <&mmc3_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index dd34527293e4..382ebd137ee4 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -119,11 +119,19 @@ }; /* dummy clock until actually implemented */ - pll6: pll6_clk { + pll5: pll5_clk { #clock-cells = <0>; compatible = "fixed-clock"; - clock-frequency = <600000000>; - clock-output-names = "pll6"; + clock-frequency = <0>; + clock-output-names = "pll5"; + }; + + pll6: clk@01c20028 { + #clock-cells = <1>; + compatible = "allwinner,sun6i-a31-pll6-clk"; + reg = <0x01c20028 0x4>; + clocks = <&osc24M>; + clock-output-names = "pll6", "pll6x2"; }; cpu: cpu_clk@01c20050 { @@ -149,19 +157,11 @@ clock-output-names = "axi"; }; - ahb1_mux: ahb1_mux_clk@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun6i-a31-ahb1-mux-clk"; - reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>; - clock-output-names = "ahb1_mux"; - }; - ahb1: ahb1_clk@01c20054 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-ahb-clk"; + compatible = "allwinner,sun6i-a31-ahb1-clk"; reg = <0x01c20054 0x4>; - clocks = <&ahb1_mux>; + clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>; clock-output-names = "ahb1"; }; @@ -202,7 +202,7 @@ #clock-cells = <0>; compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; - clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>; + clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>; clock-output-names = "apb2"; }; @@ -218,27 +218,41 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; - clocks = <&osc24M>, <&pll6>; - clock-output-names = "mmc0"; + clocks = <&osc24M>, <&pll6 0>; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; - clocks = <&osc24M>, <&pll6>; - clock-output-names = "mmc1"; + clocks = <&osc24M>, <&pll6 0>; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; - clocks = <&osc24M>, <&pll6>; - clock-output-names = "mmc2"; + clocks = <&osc24M>, <&pll6 0>; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; + }; + + mbus_clk: clk@01c2015c { + #clock-cells = <0>; + compatible = "allwinner,sun8i-a23-mbus-clk"; + reg = <0x01c2015c 0x4>; + clocks = <&osc24M>, <&pll6 1>, <&pll5>; + clock-output-names = "mbus"; }; }; @@ -260,8 +274,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb1_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 8>; reset-names = "ahb"; interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>; @@ -271,8 +291,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb1_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 9>; reset-names = "ahb"; interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>; @@ -282,8 +308,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb1_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 10>; reset-names = "ahb"; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index e8a4c955241b..b7e6b6fba5e0 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y CONFIG_ARCH_STI=y CONFIG_ARCH_EXYNOS=y CONFIG_EXYNOS5420_MCPM=y +CONFIG_ARCH_SHMOBILE_MULTI=y +CONFIG_ARCH_EMEV2=y +CONFIG_ARCH_R7S72100=y +CONFIG_ARCH_R8A73A4=y +CONFIG_ARCH_R8A7740=y +CONFIG_ARCH_R8A7779=y +CONFIG_ARCH_R8A7790=y +CONFIG_ARCH_R8A7791=y +CONFIG_ARCH_R8A7794=y +CONFIG_ARCH_SH73A0=y +CONFIG_MACH_MARZEN=y CONFIG_ARCH_SUNXI=y CONFIG_ARCH_SIRF=y CONFIG_ARCH_TEGRA=y @@ -84,6 +95,8 @@ CONFIG_PCI_KEYSTONE=y CONFIG_PCI_MSI=y CONFIG_PCI_MVEBU=y CONFIG_PCI_TEGRA=y +CONFIG_PCI_RCAR_GEN2=y +CONFIG_PCI_RCAR_GEN2_PCIE=y CONFIG_PCIEPORTBUS=y CONFIG_SMP=y CONFIG_NR_CPUS=8 @@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_OMAP_OCP2SCP=y +CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y CONFIG_AHCI_TEGRA=y CONFIG_SATA_HIGHBANK=y CONFIG_SATA_MV=y +CONFIG_SATA_RCAR=y CONFIG_NETDEVICES=y CONFIG_HIX5HD2_GMAC=y CONFIG_SUN4I_EMAC=y @@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y CONFIG_MVNETA=y CONFIG_KS8851=y CONFIG_R8169=y +CONFIG_SH_ETH=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y CONFIG_TI_CPSW=y CONFIG_XILINX_EMACLITE=y CONFIG_AT803X_PHY=y CONFIG_MARVELL_PHY=y +CONFIG_SMSC_PHY=y CONFIG_BROADCOM_PHY=y CONFIG_ICPLUS_PHY=y +CONFIG_MICREL_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y @@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=y +CONFIG_TOUCHSCREEN_ST1232=m CONFIG_TOUCHSCREEN_STMPE=y CONFIG_TOUCHSCREEN_SUN4I=y CONFIG_INPUT_MISC=y CONFIG_INPUT_MPU3050=y CONFIG_INPUT_AXP20X_PEK=y +CONFIG_INPUT_ADXL34X=m CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y @@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_IMX=y CONFIG_SERIAL_IMX_CONSOLE=y +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=20 +CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_VT8500=y @@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_MUX_PINCTRL=y CONFIG_I2C_CADENCE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=m CONFIG_I2C_EXYNOS5=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_RIIC=y CONFIG_I2C_S3C2410=y +CONFIG_I2C_SH_MOBILE=y CONFIG_I2C_SIRF=y -CONFIG_I2C_TEGRA=y CONFIG_I2C_ST=y -CONFIG_SPI=y +CONFIG_I2C_TEGRA=y CONFIG_I2C_XILINX=y -CONFIG_SPI_DAVINCI=y +CONFIG_I2C_RCAR=y +CONFIG_SPI=y CONFIG_SPI_CADENCE=y +CONFIG_SPI_DAVINCI=y CONFIG_SPI_OMAP24XX=y CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y +CONFIG_SPI_RSPI=y +CONFIG_SPI_SH_MSIOF=m +CONFIG_SPI_SH_HSPI=y CONFIG_SPI_SIRF=y CONFIG_SPI_SUN4I=y CONFIG_SPI_SUN6I=y @@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y CONFIG_PINCTRL_APQ8084=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_GENERIC_PLATFORM=y -CONFIG_GPIO_DWAPB=y CONFIG_GPIO_DAVINCI=y +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_EM=y +CONFIG_GPIO_RCAR=y CONFIG_GPIO_XILINX=y CONFIG_GPIO_ZYNQ=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCF857X=y CONFIG_GPIO_TWL4030=y CONFIG_GPIO_PALMAS=y CONFIG_GPIO_SYSCON=y @@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_KEYSTONE=y CONFIG_POWER_RESET_SUN6I=y +CONFIG_POWER_RESET_RMOBILE=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y +CONFIG_RCAR_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_DAVINCI_WATCHDOG CONFIG_ST_THERMAL_SYSCFG=y @@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y CONFIG_ORION_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MESON_WATCHDOG=y +CONFIG_MFD_AS3711=y CONFIG_MFD_AS3722=y CONFIG_MFD_BCM590XX=y CONFIG_MFD_AXP20X=y @@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y CONFIG_MFD_TPS6586X=y CONFIG_MFD_TPS65910=y CONFIG_REGULATOR_AB8500=y +CONFIG_REGULATOR_AS3711=y CONFIG_REGULATOR_AS3722=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_BCM590XX=y +CONFIG_REGULATOR_DA9210=y CONFIG_REGULATOR_GPIO=y CONFIG_MFD_SYSCON=y CONFIG_POWER_RESET_SYSCON=y CONFIG_REGULATOR_MAX8907=y +CONFIG_REGULATOR_MAX8973=y CONFIG_REGULATOR_MAX77686=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_S2MPS11=y @@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y CONFIG_REGULATOR_VEXPRESS=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=y CONFIG_USB_GSPCA=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_RENESAS_VSP1=m +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_ADV7180=m CONFIG_DRM=y +CONFIG_DRM_RCAR_DU=m CONFIG_DRM_TEGRA=y CONFIG_DRM_PANEL_SIMPLE=y CONFIG_FB_ARMCLCD=y CONFIG_FB_WM8505=y +CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FB_SIMPLE=y +CONFIG_FB_SH_MOBILE_MERAM=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y +CONFIG_BACKLIGHT_AS3711=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_SOUND=y @@ -343,6 +397,8 @@ CONFIG_SND=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_SOC=y +CONFIG_SND_SOC_SH4_FSI=m +CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_TEGRA=y CONFIG_SND_SOC_TEGRA_RT5640=y CONFIG_SND_SOC_TEGRA_WM8753=y @@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y CONFIG_SND_SOC_TEGRA_TRIMSLICE=y CONFIG_SND_SOC_TEGRA_ALC5632=y CONFIG_SND_SOC_TEGRA_MAX98090=y +CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_WM8978=m CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y @@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RENESAS_USBHS=m CONFIG_USB_STORAGE=y CONFIG_USB_DWC3=y CONFIG_USB_CHIPIDEA=y @@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y CONFIG_USB_MXS_PHY=y +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RCAR_GEN2_PHY=m +CONFIG_USB_GADGET=y +CONFIG_USB_RENESAS_USBHS_UDC=m CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_ARMMMCI=y @@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y CONFIG_MMC_OMAP=y CONFIG_MMC_OMAP_HS=y CONFIG_MMC_MVSDIO=y -CONFIG_MMC_SUNXI=y +CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_IDMAC=y CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_MMC_DW_ROCKCHIP=y +CONFIG_MMC_SH_MMCIF=y +CONFIG_MMC_SUNXI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y @@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_MAX8907=y CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_PALMAS=y CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_TPS6586X=y CONFIG_RTC_DRV_TPS65910=y +CONFIG_RTC_DRV_S35390A=m CONFIG_RTC_DRV_EM3027=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_VT8500=y @@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y CONFIG_DW_DMAC=y CONFIG_MV_XOR=y CONFIG_TEGRA20_APB_DMA=y +CONFIG_SH_DMAE=y +CONFIG_RCAR_AUDMAC_PP=m +CONFIG_RCAR_DMAC=y CONFIG_STE_DMA40=y CONFIG_SIRF_DMA=y CONFIG_TI_EDMA=y @@ -468,6 +539,7 @@ CONFIG_IIO=y CONFIG_XILINX_XADC=y CONFIG_AK8975=y CONFIG_PWM=y +CONFIG_PWM_RENESAS_TPU=y CONFIG_PWM_TEGRA=y CONFIG_PWM_VT8500=y CONFIG_PHY_HIX5HD2_SATA=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b7386524c356..a097cffa1231 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ECC_BCH=y CONFIG_MTD_NAND_OMAP2=y +CONFIG_MTD_NAND_OMAP_BCH=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_OMAP2=y @@ -248,6 +249,7 @@ CONFIG_TWL6040_CORE=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_PBIAS=y CONFIG_REGULATOR_TI_ABB=y +CONFIG_REGULATOR_TPS62360=m CONFIG_REGULATOR_TPS65023=y CONFIG_REGULATOR_TPS6507X=y CONFIG_REGULATOR_TPS65217=y @@ -374,7 +376,7 @@ CONFIG_PWM_TIEHRPWM=m CONFIG_PWM_TWL=m CONFIG_PWM_TWL_LED=m CONFIG_OMAP_USB2=m -CONFIG_TI_PIPE3=m +CONFIG_TI_PIPE3=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 4767eb9caa78..ce0786efd26c 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs) modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) #define __addr_ok(addr) ({ \ unsigned long flag; \ @@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs) (flag == 0); }) /* We use 33-bit arithmetic here... */ -#define __range_ok(addr,size) ({ \ +#define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ @@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *); #define __GUP_CLOBBER_32t_8 "lr", "cc" #define __GUP_CLOBBER_8 "lr", "cc" -#define __get_user_x(__r2,__p,__e,__l,__s) \ +#define __get_user_x(__r2, __p, __e, __l, __s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ __asmeq("%3", "r1") \ @@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *); /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ -#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ +#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ __get_user_x(__r2, __p, __e, __l, 32t_8) #else #define __get_user_x_32t __get_user_x @@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *); #endif -#define __get_user_check(x,p) \ +#define __get_user_check(x, p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ @@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *); __e; \ }) -#define get_user(x,p) \ +#define get_user(x, p) \ ({ \ might_fault(); \ - __get_user_check(x,p); \ + __get_user_check(x, p); \ }) extern int __put_user_1(void *, unsigned int); @@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__l,__s) \ +#define __put_user_x(__r2, __p, __e, __l, __s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ __asmeq("%3", "r1") \ @@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long); : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") -#define __put_user_check(x,p) \ +#define __put_user_check(x, p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ const typeof(*(p)) __user *__tmp_p = (p); \ @@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long); __e; \ }) -#define put_user(x,p) \ +#define put_user(x, p) \ ({ \ might_fault(); \ - __put_user_check(x,p); \ + __put_user_check(x, p); \ }) #else /* CONFIG_MMU */ @@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long); */ #define USER_DS KERNEL_DS -#define segment_eq(a,b) (1) -#define __addr_ok(addr) ((void)(addr),1) -#define __range_ok(addr,size) ((void)(addr),0) +#define segment_eq(a, b) (1) +#define __addr_ok(addr) ((void)(addr), 1) +#define __range_ok(addr, size) ((void)(addr), 0) #define get_fs() (KERNEL_DS) static inline void set_fs(mm_segment_t fs) { } -#define get_user(x,p) __get_user(x,p) -#define put_user(x,p) __put_user(x,p) +#define get_user(x, p) __get_user(x, p) +#define put_user(x, p) __put_user(x, p) #endif /* CONFIG_MMU */ -#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) +#define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define user_addr_max() \ (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) @@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs) * error occurs, and leave it unchanged on success. Note that these * versions are void (ie, don't return a value as such). */ -#define __get_user(x,ptr) \ +#define __get_user(x, ptr) \ ({ \ long __gu_err = 0; \ - __get_user_err((x),(ptr),__gu_err); \ + __get_user_err((x), (ptr), __gu_err); \ __gu_err; \ }) -#define __get_user_error(x,ptr,err) \ +#define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x),(ptr),err); \ + __get_user_err((x), (ptr), err); \ (void) 0; \ }) -#define __get_user_err(x,ptr,err) \ +#define __get_user_err(x, ptr, err) \ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ might_fault(); \ switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ - case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ - case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ + case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ + case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ + case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ default: (__gu_val) = __get_user_bad(); \ } \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_asm_byte(x,addr,err) \ +#define __get_user_asm_byte(x, addr, err) \ __asm__ __volatile__( \ "1: " TUSER(ldrb) " %1,[%2],#0\n" \ "2:\n" \ @@ -330,7 +330,7 @@ do { \ : "cc") #ifndef __ARMEB__ -#define __get_user_asm_half(x,__gu_addr,err) \ +#define __get_user_asm_half(x, __gu_addr, err) \ ({ \ unsigned long __b1, __b2; \ __get_user_asm_byte(__b1, __gu_addr, err); \ @@ -338,7 +338,7 @@ do { \ (x) = __b1 | (__b2 << 8); \ }) #else -#define __get_user_asm_half(x,__gu_addr,err) \ +#define __get_user_asm_half(x, __gu_addr, err) \ ({ \ unsigned long __b1, __b2; \ __get_user_asm_byte(__b1, __gu_addr, err); \ @@ -347,7 +347,7 @@ do { \ }) #endif -#define __get_user_asm_word(x,addr,err) \ +#define __get_user_asm_word(x, addr, err) \ __asm__ __volatile__( \ "1: " TUSER(ldr) " %1,[%2],#0\n" \ "2:\n" \ @@ -365,35 +365,35 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") -#define __put_user(x,ptr) \ +#define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x),(ptr),__pu_err); \ + __put_user_err((x), (ptr), __pu_err); \ __pu_err; \ }) -#define __put_user_error(x,ptr,err) \ +#define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x),(ptr),err); \ + __put_user_err((x), (ptr), err); \ (void) 0; \ }) -#define __put_user_err(x,ptr,err) \ +#define __put_user_err(x, ptr, err) \ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ might_fault(); \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ - case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ - case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ - case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ + case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ + case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ + case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \ + case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ default: __put_user_bad(); \ } \ } while (0) -#define __put_user_asm_byte(x,__pu_addr,err) \ +#define __put_user_asm_byte(x, __pu_addr, err) \ __asm__ __volatile__( \ "1: " TUSER(strb) " %1,[%2],#0\n" \ "2:\n" \ @@ -411,22 +411,22 @@ do { \ : "cc") #ifndef __ARMEB__ -#define __put_user_asm_half(x,__pu_addr,err) \ +#define __put_user_asm_half(x, __pu_addr, err) \ ({ \ - unsigned long __temp = (unsigned long)(x); \ + unsigned long __temp = (__force unsigned long)(x); \ __put_user_asm_byte(__temp, __pu_addr, err); \ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ }) #else -#define __put_user_asm_half(x,__pu_addr,err) \ +#define __put_user_asm_half(x, __pu_addr, err) \ ({ \ - unsigned long __temp = (unsigned long)(x); \ + unsigned long __temp = (__force unsigned long)(x); \ __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ __put_user_asm_byte(__temp, __pu_addr + 1, err); \ }) #endif -#define __put_user_asm_word(x,__pu_addr,err) \ +#define __put_user_asm_word(x, __pu_addr, err) \ __asm__ __volatile__( \ "1: " TUSER(str) " %1,[%2],#0\n" \ "2:\n" \ @@ -451,7 +451,7 @@ do { \ #define __reg_oper1 "%R2" #endif -#define __put_user_asm_dword(x,__pu_addr,err) \ +#define __put_user_asm_dword(x, __pu_addr, err) \ __asm__ __volatile__( \ ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ @@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); #else -#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) -#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) -#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) +#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) +#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) +#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) #endif static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index dd9acc95ebc0..61b53c46edfa 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -231,7 +231,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) /* * PMU platform driver and devicetree bindings. */ -static struct of_device_id cpu_pmu_of_device_ids[] = { +static const struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig index 8423be76080e..52241207a82a 100644 --- a/arch/arm/mach-asm9260/Kconfig +++ b/arch/arm/mach-asm9260/Kconfig @@ -2,5 +2,7 @@ config MACH_ASM9260 bool "Alphascale ASM9260" depends on ARCH_MULTI_V5 select CPU_ARM926T + select ASM9260_TIMER + select GENERIC_CLOCKEVENTS help Support for Alphascale ASM9260 based platform. diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index c6740e359a44..c74a44324e5b 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -64,7 +64,6 @@ config SOC_SAMA5D4 select SOC_SAMA5 select CLKSRC_MMIO select CACHE_L2X0 - select CACHE_PL310 select HAVE_FB_ATMEL select HAVE_AT91_UTMI select HAVE_AT91_SMD diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 51761f8927b7..b00d09555f2b 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -183,7 +183,7 @@ static struct clock_event_device clkevt = { void __iomem *at91_st_base; EXPORT_SYMBOL_GPL(at91_st_base); -static struct of_device_id at91rm9200_st_timer_ids[] = { +static const struct of_device_id at91rm9200_st_timer_ids[] = { { .compatible = "atmel,at91rm9200-st" }, { /* sentinel */ } }; diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index a6e726a6e0b5..583369ffc284 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -35,10 +35,10 @@ extern void __init at91sam9260_pm_init(void); extern void __init at91sam9g45_pm_init(void); extern void __init at91sam9x5_pm_init(void); #else -void __init at91rm9200_pm_init(void) { } -void __init at91sam9260_pm_init(void) { } -void __init at91sam9g45_pm_init(void) { } -void __init at91sam9x5_pm_init(void) { } +static inline void __init at91rm9200_pm_init(void) { } +static inline void __init at91sam9260_pm_init(void) { } +static inline void __init at91sam9g45_pm_init(void) { } +static inline void __init at91sam9x5_pm_init(void) { } #endif #endif /* _AT91_GENERIC_H */ diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index af8d8afc2e12..5e34fb143309 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -226,7 +226,7 @@ void at91_pm_set_standby(void (*at91_standby)(void)) } } -static struct of_device_id ramc_ids[] = { +static const struct of_device_id ramc_ids[] __initconst = { { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, @@ -234,7 +234,7 @@ static struct of_device_id ramc_ids[] = { { /*sentinel*/ } }; -static void at91_dt_ramc(void) +static __init void at91_dt_ramc(void) { struct device_node *np; const struct of_device_id *of_id; diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c index 19e5a1d95397..4db76a493c5a 100644 --- a/arch/arm/mach-axxia/axxia.c +++ b/arch/arm/mach-axxia/axxia.c @@ -16,7 +16,7 @@ #include <linux/init.h> #include <asm/mach/arch.h> -static const char *axxia_dt_match[] __initconst = { +static const char *const axxia_dt_match[] __initconst = { "lsi,axm5516", "lsi,axm5516-sim", "lsi,axm5516-emu", diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index aaeec78c3ec4..8b11f44bb36e 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE This enables support for systems based on Broadcom mobile SoCs. config ARCH_BCM_281XX - bool "Broadcom BCM281XX SoC family" + bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7 select ARCH_BCM_MOBILE select HAVE_SMP help @@ -77,7 +77,7 @@ config ARCH_BCM_281XX variants. config ARCH_BCM_21664 - bool "Broadcom BCM21664 SoC family" + bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7 select ARCH_BCM_MOBILE select HAVE_SMP help diff --git a/arch/arm/mach-bcm/brcmstb.c b/arch/arm/mach-bcm/brcmstb.c index 60a5afa06ed7..3a60f7ee3f0c 100644 --- a/arch/arm/mach-bcm/brcmstb.c +++ b/arch/arm/mach-bcm/brcmstb.c @@ -17,7 +17,7 @@ #include <asm/mach-types.h> #include <asm/mach/arch.h> -static const char *brcmstb_match[] __initconst = { +static const char *const brcmstb_match[] __initconst = { "brcm,bcm7445", "brcm,brcmstb", NULL diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index 584e8d4e2892..cd30f6f5f2ff 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -32,12 +32,14 @@ config ARCH_DAVINCI_DM646x config ARCH_DAVINCI_DA830 bool "DA830/OMAP-L137/AM17x based system" + depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR select ARCH_DAVINCI_DA8XX select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1 select CP_INTC config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" + depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR select ARCH_DAVINCI_DA8XX select CP_INTC diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c index f703d82f08a8..438f68547f4c 100644 --- a/arch/arm/mach-davinci/da8xx-dt.c +++ b/arch/arm/mach-davinci/da8xx-dt.c @@ -20,7 +20,7 @@ #define DA8XX_NUM_UARTS 3 -static struct of_device_id da8xx_irq_match[] __initdata = { +static const struct of_device_id da8xx_irq_match[] __initconst = { { .compatible = "ti,cp-intc", .data = cp_intc_of_init, }, { } }; diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c index a8eb909a2b6c..6a2ff0a654a5 100644 --- a/arch/arm/mach-davinci/mux.c +++ b/arch/arm/mach-davinci/mux.c @@ -30,7 +30,7 @@ static void __iomem *pinmux_base; /* * Sets the DAVINCI MUX register based on the table */ -int __init_or_module davinci_cfg_reg(const unsigned long index) +int davinci_cfg_reg(const unsigned long index) { static DEFINE_SPINLOCK(mux_spin_lock); struct davinci_soc_info *soc_info = &davinci_soc_info; @@ -101,7 +101,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index) } EXPORT_SYMBOL(davinci_cfg_reg); -int __init_or_module davinci_cfg_reg_list(const short pins[]) +int davinci_cfg_reg_list(const short pins[]) { int i, error = -EINVAL; diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 2013f73797ed..9e9dfdfad9d7 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -227,7 +227,7 @@ static void __init exynos_dt_machine_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static char const *exynos_dt_compat[] __initconst = { +static char const *const exynos_dt_compat[] __initconst = { "samsung,exynos3", "samsung,exynos3250", "samsung,exynos4", diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 666ec3e5b03f..52e2b1a2fddb 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -587,7 +587,7 @@ static struct exynos_pm_data exynos5420_pm_data = { .cpu_suspend = exynos5420_cpu_suspend, }; -static struct of_device_id exynos_pmu_of_device_ids[] = { +static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = { { .compatible = "samsung,exynos3250-pmu", .data = &exynos3250_pm_data, diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 07a09570175d..231fba0d03e5 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -169,7 +169,7 @@ static void __init highbank_init(void) platform_device_register(&highbank_cpuidle_device); } -static const char *highbank_match[] __initconst = { +static const char *const highbank_match[] __initconst = { "calxeda,highbank", "calxeda,ecx-2000", NULL, diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c index 76b907078b58..c6bd7c7bd4aa 100644 --- a/arch/arm/mach-hisi/hisilicon.c +++ b/arch/arm/mach-hisi/hisilicon.c @@ -45,7 +45,7 @@ static void __init hi3620_map_io(void) iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc)); } -static const char *hi3xxx_compat[] __initconst = { +static const char *const hi3xxx_compat[] __initconst = { "hisilicon,hi3620-hi4511", NULL, }; @@ -55,7 +55,7 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)") .dt_compat = hi3xxx_compat, MACHINE_END -static const char *hix5hd2_compat[] __initconst = { +static const char *const hix5hd2_compat[] __initconst = { "hisilicon,hix5hd2", NULL, }; @@ -64,7 +64,7 @@ DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)") .dt_compat = hix5hd2_compat, MACHINE_END -static const char *hip04_compat[] __initconst = { +static const char *const hip04_compat[] __initconst = { "hisilicon,hip04-d01", NULL, }; @@ -73,7 +73,7 @@ DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)") .dt_compat = hip04_compat, MACHINE_END -static const char *hip01_compat[] __initconst = { +static const char *const hip01_compat[] __initconst = { "hisilicon,hip01", "hisilicon,hip01-ca9x2", NULL, diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index a377f95033ae..0411f0664c15 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -68,7 +68,7 @@ int imx_mmdc_get_ddr_type(void) return ddr_type; } -static struct of_device_id imx_mmdc_dt_ids[] = { +static const struct of_device_id imx_mmdc_dt_ids[] = { { .compatible = "fsl,imx6q-mmdc", }, { /* sentinel */ } }; diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h index 6a722860e34d..b02439019963 100644 --- a/arch/arm/mach-ixp4xx/include/mach/io.h +++ b/arch/arm/mach-ixp4xx/include/mach/io.h @@ -245,8 +245,10 @@ static inline void outb(u8 value, u32 addr) } #define outsb outsb -static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count) +static inline void outsb(u32 io_addr, const void *p, u32 count) { + const u8 *vaddr = p; + while (count--) outb(*vaddr++, io_addr); } @@ -262,8 +264,9 @@ static inline void outw(u16 value, u32 addr) } #define outsw outsw -static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count) +static inline void outsw(u32 io_addr, const void *p, u32 count) { + const u16 *vaddr = p; while (count--) outw(cpu_to_le16(*vaddr++), io_addr); } @@ -275,8 +278,9 @@ static inline void outl(u32 value, u32 addr) } #define outsl outsl -static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count) +static inline void outsl(u32 io_addr, const void *p, u32 count) { + const u32 *vaddr = p; while (count--) outl(cpu_to_le32(*vaddr++), io_addr); } @@ -294,8 +298,9 @@ static inline u8 inb(u32 addr) } #define insb insb -static inline void insb(u32 io_addr, u8 *vaddr, u32 count) +static inline void insb(u32 io_addr, void *p, u32 count) { + u8 *vaddr = p; while (count--) *vaddr++ = inb(io_addr); } @@ -313,8 +318,9 @@ static inline u16 inw(u32 addr) } #define insw insw -static inline void insw(u32 io_addr, u16 *vaddr, u32 count) +static inline void insw(u32 io_addr, void *p, u32 count) { + u16 *vaddr = p; while (count--) *vaddr++ = le16_to_cpu(inw(io_addr)); } @@ -330,8 +336,9 @@ static inline u32 inl(u32 addr) } #define insl insl -static inline void insl(u32 io_addr, u32 *vaddr, u32 count) +static inline void insl(u32 io_addr, void *p, u32 count) { + u32 *vaddr = p; while (count--) *vaddr++ = le32_to_cpu(inl(io_addr)); } diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index 7f352de26099..06620875813a 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -103,7 +103,7 @@ static void __init keystone_init_meminfo(void) pr_info("Switching to high address space at 0x%llx\n", (u64)offset); } -static const char *keystone_match[] __initconst = { +static const char *const keystone_match[] __initconst = { "ti,keystone", NULL, }; diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index ef6041e7e675..41bebfd296dc 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -61,7 +61,7 @@ static struct pm_clk_notifier_block platform_domain_notifier = { .pm_domain = &keystone_pm_domain, }; -static struct of_device_id of_keystone_table[] = { +static const struct of_device_id of_keystone_table[] = { {.compatible = "ti,keystone"}, { /* end of list */ }, }; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 2756351dbb35..10bfa03e58d4 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -213,7 +213,7 @@ void __init timer_init(int irq) } #ifdef CONFIG_OF -static struct of_device_id mmp_timer_dt_ids[] = { +static const struct of_device_id mmp_timer_dt_ids[] = { { .compatible = "mrvl,mmp-timer", }, {} }; diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index 61bfe584a9d7..fc832040c6e9 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c @@ -20,6 +20,7 @@ #include <linux/input.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = { [1] = { .start = MSM_GPIO_TO_INT(49), .end = MSM_GPIO_TO_INT(49), - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static struct platform_device *devices[] __initdata = { diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 4c748616ef47..10016a3bc698 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c @@ -22,6 +22,7 @@ #include <linux/usb/msm_hsusb.h> #include <linux/err.h> #include <linux/clkdev.h> +#include <linux/smc91x.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static int __init msm_init_smc91x(void) diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index b5895f040caa..e46e9ea1e187 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -51,7 +51,7 @@ enum { COHERENCY_FABRIC_TYPE_ARMADA_380, }; -static struct of_device_id of_coherency_table[] = { +static const struct of_device_id of_coherency_table[] = { {.compatible = "marvell,coherency-fabric", .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, {.compatible = "marvell,armada-375-coherency-fabric", diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index d8ab605a44fa..8b9f5e202ccf 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -104,7 +104,7 @@ static void __iomem *pmsu_mp_base; static void *mvebu_cpu_resume; -static struct of_device_id of_pmsu_table[] = { +static const struct of_device_id of_pmsu_table[] = { { .compatible = "marvell,armada-370-pmsu", }, { .compatible = "marvell,armada-370-xp-pmsu", }, { .compatible = "marvell,armada-380-pmsu", }, diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c index a068cb5c2ce8..c6c132acd7a6 100644 --- a/arch/arm/mach-mvebu/system-controller.c +++ b/arch/arm/mach-mvebu/system-controller.c @@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev) return -ENODEV; } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7) void mvebu_armada375_smp_wa_init(void) { u32 dev, rev; diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c index 3d24ebf12095..3445a5686805 100644 --- a/arch/arm/mach-nspire/nspire.c +++ b/arch/arm/mach-nspire/nspire.c @@ -27,7 +27,7 @@ #include "mmio.h" #include "clcd.h" -static const char *nspire_dt_match[] __initconst = { +static const char *const nspire_dt_match[] __initconst = { "ti,nspire", "ti,nspire-cx", "ti,nspire-tp", diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 00d5d8f9f150..b83f18fcec9b 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -190,7 +190,7 @@ obj-$(CONFIG_SOC_OMAP2430) += clock2430.o obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o -obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o cclock3xxx_data.o +obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c deleted file mode 100644 index e79c80bbc755..000000000000 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ /dev/null @@ -1,3688 +0,0 @@ -/* - * OMAP3 clock data - * - * Copyright (C) 2007-2012 Texas Instruments, Inc. - * Copyright (C) 2007-2011 Nokia Corporation - * - * Written by Paul Walmsley - * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com> - * With many device clock fixes by Kevin Hilman and Jouni Högander - * DPLL bypass clock support added by Roman Tereshonkov - * - */ - -/* - * Virtual clocks are introduced as convenient tools. - * They are sources for other clocks and not supposed - * to be requested from drivers directly. - */ - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/clk-private.h> -#include <linux/list.h> -#include <linux/io.h> - -#include "soc.h" -#include "iomap.h" -#include "clock.h" -#include "clock3xxx.h" -#include "clock34xx.h" -#include "clock36xx.h" -#include "clock3517.h" -#include "cm3xxx.h" -#include "cm-regbits-34xx.h" -#include "prm3xxx.h" -#include "prm-regbits-34xx.h" -#include "control.h" - -/* - * clocks - */ - -#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR - -/* Maximum DPLL multiplier, divider values for OMAP3 */ -#define OMAP3_MAX_DPLL_MULT 2047 -#define OMAP3630_MAX_JTYPE_DPLL_MULT 4095 -#define OMAP3_MAX_DPLL_DIV 128 - -DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0); - -DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0); - -DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0); - -DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0); - -DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0); - -DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0); - -DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0); - -static const char *osc_sys_ck_parent_names[] = { - "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck", - "virt_38_4m_ck", "virt_16_8m_ck", -}; - -DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0, - OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT, - OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL); - -DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0, - OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT, - OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); - -static struct dpll_data dpll3_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK, - .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_mask = OMAP3430_EN_CORE_DPLL_MASK, - .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE), - .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), - .idlest_mask = OMAP3430_ST_CORE_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll3_ck; - -static const char *dpll3_ck_parent_names[] = { - "sys_ck", - "sys_ck", -}; - -static const struct clk_ops dpll3_ck_ops = { - .init = &omap2_init_clk_clkdm, - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .round_rate = &omap2_dpll_round_rate, -}; - -static struct clk_hw_omap dpll3_ck_hw = { - .hw = { - .clk = &dpll3_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll3_dd, - .clkdm_name = "dpll3_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops); - -DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0, - OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT, - OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk core_ck; - -static const char *core_ck_parent_names[] = { - "dpll3_m2_ck", -}; - -static const struct clk_ops core_ck_ops = {}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL); -DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops); - -DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk security_l4_ick2; - -static const char *security_l4_ick2_parent_names[] = { - "l4_ick", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL); -DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops); - -static struct clk aes1_ick; - -static const char *aes1_ick_parent_names[] = { - "security_l4_ick2", -}; - -static const struct clk_ops aes1_ick_ops = { - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, -}; - -static struct clk_hw_omap aes1_ick_hw = { - .hw = { - .clk = &aes1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_AES1_SHIFT, -}; - -DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk core_l4_ick; - -static const struct clk_ops core_l4_ick_ops = { - .init = &omap2_init_clk_clkdm, -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk aes2_ick; - -static const char *aes2_ick_parent_names[] = { - "core_l4_ick", -}; - -static const struct clk_ops aes2_ick_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, -}; - -static struct clk_hw_omap aes2_ick_hw = { - .hw = { - .clk = &aes2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_AES2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk dpll1_fck; - -static struct dpll_data dpll1_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL), - .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK, - .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK, - .clk_bypass = &dpll1_fck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL), - .enable_mask = OMAP3430_EN_MPU_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL), - .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL), - .idlest_mask = OMAP3430_ST_MPU_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll1_ck; - -static const struct clk_ops dpll1_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap3_noncore_dpll_enable, - .disable = &omap3_noncore_dpll_disable, - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .set_rate = &omap3_noncore_dpll_set_rate, - .set_parent = &omap3_noncore_dpll_set_parent, - .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, - .determine_rate = &omap3_noncore_dpll_determine_rate, - .round_rate = &omap2_dpll_round_rate, -}; - -static struct clk_hw_omap dpll1_ck_hw = { - .hw = { - .clk = &dpll1_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll1_dd, - .clkdm_name = "dpll1_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops); - -DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1); - -DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0, - OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL), - OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT, - OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk mpu_ck; - -static const char *mpu_ck_parent_names[] = { - "dpll1_x2m2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm"); -DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops); - -DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0, - OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL), - OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH, - 0x0, NULL); - -static struct clk cam_ick; - -static struct clk_hw_omap cam_ick_hw = { - .hw = { - .clk = &cam_ick, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_CAM_SHIFT, - .clkdm_name = "cam_clkdm", -}; - -DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops); - -/* DPLL4 */ -/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */ -/* Type: DPLL */ -static struct dpll_data dpll4_dd; - -static struct dpll_data dpll4_dd_34xx __initdata = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2), - .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK, - .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE), - .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), - .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct dpll_data dpll4_dd_3630 __initdata = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2), - .mult_mask = OMAP3630_PERIPH_DPLL_MULT_MASK, - .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE), - .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), - .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK, - .dco_mask = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK, - .sddiv_mask = OMAP3630_PERIPH_DPLL_SD_DIV_MASK, - .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, - .flags = DPLL_J_TYPE -}; - -static struct clk dpll4_ck; - -static const struct clk_ops dpll4_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap3_noncore_dpll_enable, - .disable = &omap3_noncore_dpll_disable, - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .set_rate = &omap3_dpll4_set_rate, - .set_parent = &omap3_noncore_dpll_set_parent, - .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent, - .determine_rate = &omap3_noncore_dpll_determine_rate, - .round_rate = &omap2_dpll_round_rate, -}; - -static struct clk_hw_omap dpll4_ck_hw = { - .hw = { - .clk = &dpll4_ck, - }, - .dpll_data = &dpll4_dd, - .ops = &clkhwops_omap3_dpll, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops); - -static const struct clk_div_table dpll4_mx_ck_div_table[] = { - { .div = 1, .val = 1 }, - { .div = 2, .val = 2 }, - { .div = 3, .val = 3 }, - { .div = 4, .val = 4 }, - { .div = 5, .val = 5 }, - { .div = 6, .val = 6 }, - { .div = 7, .val = 7 }, - { .div = 8, .val = 8 }, - { .div = 9, .val = 9 }, - { .div = 10, .val = 10 }, - { .div = 11, .val = 11 }, - { .div = 12, .val = 12 }, - { .div = 13, .val = 13 }, - { .div = 14, .val = 14 }, - { .div = 15, .val = 15 }, - { .div = 16, .val = 16 }, - { .div = 17, .val = 17 }, - { .div = 18, .val = 18 }, - { .div = 19, .val = 19 }, - { .div = 20, .val = 20 }, - { .div = 21, .val = 21 }, - { .div = 22, .val = 22 }, - { .div = 23, .val = 23 }, - { .div = 24, .val = 24 }, - { .div = 25, .val = 25 }, - { .div = 26, .val = 26 }, - { .div = 27, .val = 27 }, - { .div = 28, .val = 28 }, - { .div = 29, .val = 29 }, - { .div = 30, .val = 30 }, - { .div = 31, .val = 31 }, - { .div = 32, .val = 32 }, - { .div = 0 }, -}; - -DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll4_m5x2_ck; - -static const char *dpll4_m5x2_ck_parent_names[] = { - "dpll4_m5_ck", -}; - -static const struct clk_ops dpll4_m5x2_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .set_rate = &omap3_clkoutx2_set_rate, - .recalc_rate = &omap3_clkoutx2_recalc, - .round_rate = &omap3_clkoutx2_round_rate, -}; - -static const struct clk_ops dpll4_m5x2_ck_3630_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore, - .disable = &omap2_dflt_clk_disable, - .recalc_rate = &omap3_clkoutx2_recalc, -}; - -static struct clk_hw_omap dpll4_m5x2_ck_hw = { - .hw = { - .clk = &dpll4_m5x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_CAM_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, - dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); - -static struct clk dpll4_m5x2_ck_3630 = { - .name = "dpll4_m5x2_ck", - .hw = &dpll4_m5x2_ck_hw.hw, - .parent_names = dpll4_m5x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m5x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, - .flags = CLK_SET_RATE_PARENT, -}; - -static struct clk cam_mclk; - -static const char *cam_mclk_parent_names[] = { - "dpll4_m5x2_ck", -}; - -static struct clk_hw_omap cam_mclk_hw = { - .hw = { - .clk = &cam_mclk, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_CAM_SHIFT, - .clkdm_name = "cam_clkdm", -}; - -static struct clk cam_mclk = { - .name = "cam_mclk", - .hw = &cam_mclk_hw.hw, - .parent_names = cam_mclk_parent_names, - .num_parents = ARRAY_SIZE(cam_mclk_parent_names), - .ops = &aes2_ick_ops, - .flags = CLK_SET_RATE_PARENT, -}; - -static const struct clksel_rate clkout2_src_core_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate clkout2_src_sys_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate clkout2_src_96m_rates[] = { - { .div = 1, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3), - OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll4_m2x2_ck; - -static const char *dpll4_m2x2_ck_parent_names[] = { - "dpll4_m2_ck", -}; - -static struct clk_hw_omap dpll4_m2x2_ck_hw = { - .hw = { - .clk = &dpll4_m2x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_96M_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll4_m2x2_ck_3630 = { - .name = "dpll4_m2x2_ck", - .hw = &dpll4_m2x2_ck_hw.hw, - .parent_names = dpll4_m2x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m2x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -static struct clk omap_96m_alwon_fck; - -static const char *omap_96m_alwon_fck_parent_names[] = { - "dpll4_m2x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL); -DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names, - core_ck_ops); - -static struct clk cm_96m_fck; - -static const char *cm_96m_fck_parent_names[] = { - "omap_96m_alwon_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL); -DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops); - -static const struct clksel_rate clkout2_src_54m_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH, - 0, dpll4_mx_ck_div_table, NULL); - -static struct clk dpll4_m3x2_ck; - -static const char *dpll4_m3x2_ck_parent_names[] = { - "dpll4_m3_ck", -}; - -static struct clk_hw_omap dpll4_m3x2_ck_hw = { - .hw = { - .clk = &dpll4_m3x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_TV_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll4_m3x2_ck_3630 = { - .name = "dpll4_m3x2_ck", - .hw = &dpll4_m3x2_ck_hw.hw, - .parent_names = dpll4_m3x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m3x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -static const char *omap_54m_fck_parent_names[] = { - "dpll4_m3x2_ck", "sys_altclk", -}; - -DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0, - OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT, - OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL); - -static const struct clksel clkout2_src_clksel[] = { - { .parent = &core_ck, .rates = clkout2_src_core_rates }, - { .parent = &sys_ck, .rates = clkout2_src_sys_rates }, - { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates }, - { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates }, - { .parent = NULL }, -}; - -static const char *clkout2_src_ck_parent_names[] = { - "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck", -}; - -static const struct clk_ops clkout2_src_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm", - clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL, - OMAP3430_CLKOUT2SOURCE_MASK, - OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT, - NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops); - -static const struct clksel_rate omap_48m_cm96m_rates[] = { - { .div = 2, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate omap_48m_alt_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel omap_48m_clksel[] = { - { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates }, - { .parent = &sys_altclk, .rates = omap_48m_alt_rates }, - { .parent = NULL }, -}; - -static const char *omap_48m_fck_parent_names[] = { - "cm_96m_fck", "sys_altclk", -}; - -static struct clk omap_48m_fck; - -static const struct clk_ops omap_48m_fck_ops = { - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -static struct clk_hw_omap omap_48m_fck_hw = { - .hw = { - .clk = &omap_48m_fck, - }, - .clksel = omap_48m_clksel, - .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - .clksel_mask = OMAP3430_SOURCE_48M_MASK, -}; - -DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops); - -DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4); - -static struct clk core_12m_fck; - -static const char *core_12m_fck_parent_names[] = { - "omap_12m_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops); - -static struct clk core_48m_fck; - -static const char *core_48m_fck_parent_names[] = { - "omap_48m_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops); - -static const char *omap_96m_fck_parent_names[] = { - "cm_96m_fck", "sys_ck", -}; - -DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0, - OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL); - -static struct clk core_96m_fck; - -static const char *core_96m_fck_parent_names[] = { - "omap_96m_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops); - -static struct clk core_l3_ick; - -static const char *core_l3_ick_parent_names[] = { - "l3_ick", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm"); -DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops); - -DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1); - -static struct clk corex2_fck; - -static const char *corex2_fck_parent_names[] = { - "dpll3_m2x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL); -DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops); - -static const char *cpefuse_fck_parent_names[] = { - "sys_ck", -}; - -static struct clk cpefuse_fck; - -static struct clk_hw_omap cpefuse_fck_hw = { - .hw = { - .clk = &cpefuse_fck, - }, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), - .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk csi2_96m_fck; - -static const char *csi2_96m_fck_parent_names[] = { - "core_96m_fck", -}; - -static struct clk_hw_omap csi2_96m_fck_hw = { - .hw = { - .clk = &csi2_96m_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_CSI2_SHIFT, - .clkdm_name = "cam_clkdm", -}; - -DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk d2d_26m_fck; - -static struct clk_hw_omap d2d_26m_fck_hw = { - .hw = { - .clk = &d2d_26m_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430ES1_EN_D2D_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk des1_ick; - -static struct clk_hw_omap des1_ick_hw = { - .hw = { - .clk = &des1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_DES1_SHIFT, -}; - -DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk des2_ick; - -static struct clk_hw_omap des2_ick_hw = { - .hw = { - .clk = &des2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_DES2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0, - OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL), - OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll2_fck; - -static struct dpll_data dpll2_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL), - .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK, - .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK, - .clk_bypass = &dpll2_fck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL), - .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK, - .modes = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) | - (1 << DPLL_LOW_POWER_BYPASS)), - .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL), - .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL), - .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll2_ck; - -static struct clk_hw_omap dpll2_ck_hw = { - .hw = { - .clk = &dpll2_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll2_dd, - .clkdm_name = "dpll2_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops); - -DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL), - OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL), - OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT, - OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll3_m3x2_ck; - -static const char *dpll3_m3x2_ck_parent_names[] = { - "dpll3_m3_ck", -}; - -static struct clk_hw_omap dpll3_m3x2_ck_hw = { - .hw = { - .clk = &dpll3_m3x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll3_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll3_m3x2_ck_3630 = { - .name = "dpll3_m3x2_ck", - .hw = &dpll3_m3x2_ck_hw.hw, - .parent_names = dpll3_m3x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll3_m3x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1); - -DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH, - 0, dpll4_mx_ck_div_table, NULL); - -static struct clk dpll4_m4x2_ck; - -static const char *dpll4_m4x2_ck_parent_names[] = { - "dpll4_m4_ck", -}; - -static struct clk_hw_omap dpll4_m4x2_ck_hw = { - .hw = { - .clk = &dpll4_m4x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names, - dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); - -static struct clk dpll4_m4x2_ck_3630 = { - .name = "dpll4_m4x2_ck", - .hw = &dpll4_m4x2_ck_hw.hw, - .parent_names = dpll4_m4x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m4x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, - .flags = CLK_SET_RATE_PARENT, -}; - -DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll4_m6x2_ck; - -static const char *dpll4_m6x2_ck_parent_names[] = { - "dpll4_m6_ck", -}; - -static struct clk_hw_omap dpll4_m6x2_ck_hw = { - .hw = { - .clk = &dpll4_m6x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll4_m6x2_ck_3630 = { - .name = "dpll4_m6x2_ck", - .hw = &dpll4_m6x2_ck_hw.hw, - .parent_names = dpll4_m6x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m6x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1); - -static struct dpll_data dpll5_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4), - .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK, - .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2), - .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL), - .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2), - .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll5_ck; - -static struct clk_hw_omap dpll5_ck_hw = { - .hw = { - .clk = &dpll5_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll5_dd, - .clkdm_name = "dpll5_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops); - -DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0, - OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5), - OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dss1_alwon_fck_3430es1; - -static const char *dss1_alwon_fck_3430es1_parent_names[] = { - "dpll4_m4x2_ck", -}; - -static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = { - .hw = { - .clk = &dss1_alwon_fck_3430es1, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_DSS1_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1, - dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops, - CLK_SET_RATE_PARENT); - -static struct clk dss1_alwon_fck_3430es2; - -static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = { - .hw = { - .clk = &dss1_alwon_fck_3430es2, - }, - .ops = &clkhwops_omap3430es2_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_DSS1_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2, - dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops, - CLK_SET_RATE_PARENT); - -static struct clk dss2_alwon_fck; - -static struct clk_hw_omap dss2_alwon_fck_hw = { - .hw = { - .clk = &dss2_alwon_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_DSS2_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk dss_96m_fck; - -static struct clk_hw_omap dss_96m_fck_hw = { - .hw = { - .clk = &dss_96m_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_TV_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops); - -static struct clk dss_ick_3430es1; - -static struct clk_hw_omap dss_ick_3430es1_hw = { - .hw = { - .clk = &dss_ick_3430es1, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops); - -static struct clk dss_ick_3430es2; - -static struct clk_hw_omap dss_ick_3430es2_hw = { - .hw = { - .clk = &dss_ick_3430es2, - }, - .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops); - -static struct clk dss_tv_fck; - -static const char *dss_tv_fck_parent_names[] = { - "omap_54m_fck", -}; - -static struct clk_hw_omap dss_tv_fck_hw = { - .hw = { - .clk = &dss_tv_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_TV_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops); - -static struct clk emac_fck; - -static const char *emac_fck_parent_names[] = { - "rmii_ck", -}; - -static struct clk_hw_omap emac_fck_hw = { - .hw = { - .clk = &emac_fck, - }, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_CPGMAC_FCLK_SHIFT, -}; - -DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops); - -static struct clk ipss_ick; - -static const char *ipss_ick_parent_names[] = { - "core_l3_ick", -}; - -static struct clk_hw_omap ipss_ick_hw = { - .hw = { - .clk = &ipss_ick, - }, - .ops = &clkhwops_am35xx_ipss_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = AM35XX_EN_IPSS_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops); - -static struct clk emac_ick; - -static const char *emac_ick_parent_names[] = { - "ipss_ick", -}; - -static struct clk_hw_omap emac_ick_hw = { - .hw = { - .clk = &emac_ick, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_CPGMAC_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops); - -static struct clk emu_core_alwon_ck; - -static const char *emu_core_alwon_ck_parent_names[] = { - "dpll3_m3x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm"); -DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names, - core_l4_ick_ops); - -static struct clk emu_mpu_alwon_ck; - -static const char *emu_mpu_alwon_ck_parent_names[] = { - "mpu_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL); -DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops); - -static struct clk emu_per_alwon_ck; - -static const char *emu_per_alwon_ck_parent_names[] = { - "dpll4_m6x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm"); -DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names, - core_l4_ick_ops); - -static const char *emu_src_ck_parent_names[] = { - "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck", -}; - -static const struct clksel_rate emu_src_sys_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel_rate emu_src_core_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel_rate emu_src_per_rates[] = { - { .div = 1, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel_rate emu_src_mpu_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel emu_src_clksel[] = { - { .parent = &sys_ck, .rates = emu_src_sys_rates }, - { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates }, - { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates }, - { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates }, - { .parent = NULL }, -}; - -static const struct clk_ops emu_src_ck_ops = { - .init = &omap2_init_clk_clkdm, - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, - .enable = &omap2_clkops_enable_clkdm, - .disable = &omap2_clkops_disable_clkdm, -}; - -static struct clk emu_src_ck; - -static struct clk_hw_omap emu_src_ck_hw = { - .hw = { - .clk = &emu_src_ck, - }, - .clksel = emu_src_clksel, - .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - .clksel_mask = OMAP3430_MUX_CTRL_MASK, - .clkdm_name = "emu_clkdm", -}; - -DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops); - -DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk fac_ick; - -static struct clk_hw_omap fac_ick_hw = { - .hw = { - .clk = &fac_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430ES1_EN_FAC_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk fshostusb_fck; - -static const char *fshostusb_fck_parent_names[] = { - "core_48m_fck", -}; - -static struct clk_hw_omap fshostusb_fck_hw = { - .hw = { - .clk = &fshostusb_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk gfx_l3_ck; - -static struct clk_hw_omap gfx_l3_ck_hw = { - .hw = { - .clk = &gfx_l3_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN), - .enable_bit = OMAP_EN_GFX_SHIFT, - .clkdm_name = "gfx_3430es1_clkdm", -}; - -DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops); - -DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0, - OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL), - OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk gfx_cg1_ck; - -static const char *gfx_cg1_ck_parent_names[] = { - "gfx_l3_fck", -}; - -static struct clk_hw_omap gfx_cg1_ck_hw = { - .hw = { - .clk = &gfx_cg1_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES1_EN_2D_SHIFT, - .clkdm_name = "gfx_3430es1_clkdm", -}; - -DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops); - -static struct clk gfx_cg2_ck; - -static struct clk_hw_omap gfx_cg2_ck_hw = { - .hw = { - .clk = &gfx_cg2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES1_EN_3D_SHIFT, - .clkdm_name = "gfx_3430es1_clkdm", -}; - -DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops); - -static struct clk gfx_l3_ick; - -static const char *gfx_l3_ick_parent_names[] = { - "gfx_l3_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm"); -DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops); - -static struct clk wkup_32k_fck; - -static const char *wkup_32k_fck_parent_names[] = { - "omap_32k_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops); - -static struct clk gpio1_dbck; - -static const char *gpio1_dbck_parent_names[] = { - "wkup_32k_fck", -}; - -static struct clk_hw_omap gpio1_dbck_hw = { - .hw = { - .clk = &gpio1_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops); - -static struct clk wkup_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops); - -static struct clk gpio1_ick; - -static const char *gpio1_ick_parent_names[] = { - "wkup_l4_ick", -}; - -static struct clk_hw_omap gpio1_ick_hw = { - .hw = { - .clk = &gpio1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk per_32k_alwon_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm"); -DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names, - core_l4_ick_ops); - -static struct clk gpio2_dbck; - -static const char *gpio2_dbck_parent_names[] = { - "per_32k_alwon_fck", -}; - -static struct clk_hw_omap gpio2_dbck_hw = { - .hw = { - .clk = &gpio2_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk per_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm"); -DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk gpio2_ick; - -static const char *gpio2_ick_parent_names[] = { - "per_l4_ick", -}; - -static struct clk_hw_omap gpio2_ick_hw = { - .hw = { - .clk = &gpio2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio3_dbck; - -static struct clk_hw_omap gpio3_dbck_hw = { - .hw = { - .clk = &gpio3_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio3_ick; - -static struct clk_hw_omap gpio3_ick_hw = { - .hw = { - .clk = &gpio3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio4_dbck; - -static struct clk_hw_omap gpio4_dbck_hw = { - .hw = { - .clk = &gpio4_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio4_ick; - -static struct clk_hw_omap gpio4_ick_hw = { - .hw = { - .clk = &gpio4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio5_dbck; - -static struct clk_hw_omap gpio5_dbck_hw = { - .hw = { - .clk = &gpio5_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO5_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio5_ick; - -static struct clk_hw_omap gpio5_ick_hw = { - .hw = { - .clk = &gpio5_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO5_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio6_dbck; - -static struct clk_hw_omap gpio6_dbck_hw = { - .hw = { - .clk = &gpio6_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO6_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio6_ick; - -static struct clk_hw_omap gpio6_ick_hw = { - .hw = { - .clk = &gpio6_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO6_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpmc_fck; - -static struct clk_hw_omap gpmc_fck_hw = { - .hw = { - .clk = &gpmc_fck, - }, - .flags = ENABLE_ON_INIT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops); - -static const struct clksel omap343x_gpt_clksel[] = { - { .parent = &omap_32k_fck, .rates = gpt_32k_rates }, - { .parent = &sys_ck, .rates = gpt_sys_rates }, - { .parent = NULL }, -}; - -static const char *gpt10_fck_parent_names[] = { - "omap_32k_fck", "sys_ck", -}; - -DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT10_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt10_ick; - -static struct clk_hw_omap gpt10_ick_hw = { - .hw = { - .clk = &gpt10_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_GPT10_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT11_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt11_ick; - -static struct clk_hw_omap gpt11_ick_hw = { - .hw = { - .clk = &gpt11_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_GPT11_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk gpt12_fck; - -static const char *gpt12_fck_parent_names[] = { - "secure_32k_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm"); -DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops); - -static struct clk gpt12_ick; - -static struct clk_hw_omap gpt12_ick_hw = { - .hw = { - .clk = &gpt12_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT12_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT1_MASK, - OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt1_ick; - -static struct clk_hw_omap gpt1_ick_hw = { - .hw = { - .clk = &gpt1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT2_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt2_ick; - -static struct clk_hw_omap gpt2_ick_hw = { - .hw = { - .clk = &gpt2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT3_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt3_ick; - -static struct clk_hw_omap gpt3_ick_hw = { - .hw = { - .clk = &gpt3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT4_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt4_ick; - -static struct clk_hw_omap gpt4_ick_hw = { - .hw = { - .clk = &gpt4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT5_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt5_ick; - -static struct clk_hw_omap gpt5_ick_hw = { - .hw = { - .clk = &gpt5_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT5_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT6_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt6_ick; - -static struct clk_hw_omap gpt6_ick_hw = { - .hw = { - .clk = &gpt6_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT6_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT7_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt7_ick; - -static struct clk_hw_omap gpt7_ick_hw = { - .hw = { - .clk = &gpt7_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT7_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT8_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt8_ick; - -static struct clk_hw_omap gpt8_ick_hw = { - .hw = { - .clk = &gpt8_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT8_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT9_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt9_ick; - -static struct clk_hw_omap gpt9_ick_hw = { - .hw = { - .clk = &gpt9_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT9_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk hdq_fck; - -static const char *hdq_fck_parent_names[] = { - "core_12m_fck", -}; - -static struct clk_hw_omap hdq_fck_hw = { - .hw = { - .clk = &hdq_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_HDQ_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops); - -static struct clk hdq_ick; - -static struct clk_hw_omap hdq_ick_hw = { - .hw = { - .clk = &hdq_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_HDQ_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk hecc_ck; - -static struct clk_hw_omap hecc_ck_hw = { - .hw = { - .clk = &hecc_ck, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_HECC_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_fck_am35xx; - -static struct clk_hw_omap hsotgusb_fck_am35xx_hw = { - .hw = { - .clk = &hsotgusb_fck_am35xx, - }, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_USBOTG_FCLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_ick_3430es1; - -static struct clk_hw_omap hsotgusb_ick_3430es1_hw = { - .hw = { - .clk = &hsotgusb_ick_3430es1, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_ick_3430es2; - -static struct clk_hw_omap hsotgusb_ick_3430es2_hw = { - .hw = { - .clk = &hsotgusb_ick_3430es2, - }, - .ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_ick_am35xx; - -static struct clk_hw_omap hsotgusb_ick_am35xx_hw = { - .hw = { - .clk = &hsotgusb_ick_am35xx, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_USBOTG_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops); - -static struct clk i2c1_fck; - -static struct clk_hw_omap i2c1_fck_hw = { - .hw = { - .clk = &i2c1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_I2C1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk i2c1_ick; - -static struct clk_hw_omap i2c1_ick_hw = { - .hw = { - .clk = &i2c1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_I2C1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk i2c2_fck; - -static struct clk_hw_omap i2c2_fck_hw = { - .hw = { - .clk = &i2c2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_I2C2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk i2c2_ick; - -static struct clk_hw_omap i2c2_ick_hw = { - .hw = { - .clk = &i2c2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_I2C2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk i2c3_fck; - -static struct clk_hw_omap i2c3_fck_hw = { - .hw = { - .clk = &i2c3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_I2C3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk i2c3_ick; - -static struct clk_hw_omap i2c3_ick_hw = { - .hw = { - .clk = &i2c3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_I2C3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk icr_ick; - -static struct clk_hw_omap icr_ick_hw = { - .hw = { - .clk = &icr_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_ICR_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk iva2_ck; - -static const char *iva2_ck_parent_names[] = { - "dpll2_m2_ck", -}; - -static struct clk_hw_omap iva2_ck_hw = { - .hw = { - .clk = &iva2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, - .clkdm_name = "iva2_clkdm", -}; - -DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops); - -static struct clk mad2d_ick; - -static struct clk_hw_omap mad2d_ick_hw = { - .hw = { - .clk = &mad2d_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), - .enable_bit = OMAP3430_EN_MAD2D_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops); - -static struct clk mailboxes_ick; - -static struct clk_hw_omap mailboxes_ick_hw = { - .hw = { - .clk = &mailboxes_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate common_mcbsp_96m_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate common_mcbsp_mcbsp_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel mcbsp_15_clksel[] = { - { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates }, - { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates }, - { .parent = NULL }, -}; - -static const char *mcbsp1_fck_parent_names[] = { - "core_96m_fck", "mcbsp_clks", -}; - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel, - OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0), - OMAP2_MCBSP1_CLKS_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait, - mcbsp1_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp1_ick; - -static struct clk_hw_omap mcbsp1_ick_hw = { - .hw = { - .clk = &mcbsp1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCBSP1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk per_96m_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm"); -DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops); - -static const struct clksel mcbsp_234_clksel[] = { - { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates }, - { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates }, - { .parent = NULL }, -}; - -static const char *mcbsp2_fck_parent_names[] = { - "per_96m_fck", "mcbsp_clks", -}; - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel, - OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0), - OMAP2_MCBSP2_CLKS_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait, - mcbsp2_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp2_ick; - -static struct clk_hw_omap mcbsp2_ick_hw = { - .hw = { - .clk = &mcbsp2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_MCBSP2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel, - OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1), - OMAP2_MCBSP3_CLKS_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait, - mcbsp2_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp3_ick; - -static struct clk_hw_omap mcbsp3_ick_hw = { - .hw = { - .clk = &mcbsp3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_MCBSP3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel, - OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1), - OMAP2_MCBSP4_CLKS_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait, - mcbsp2_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp4_ick; - -static struct clk_hw_omap mcbsp4_ick_hw = { - .hw = { - .clk = &mcbsp4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_MCBSP4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel, - OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1), - OMAP2_MCBSP5_CLKS_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait, - mcbsp1_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp5_ick; - -static struct clk_hw_omap mcbsp5_ick_hw = { - .hw = { - .clk = &mcbsp5_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCBSP5_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi1_fck; - -static struct clk_hw_omap mcspi1_fck_hw = { - .hw = { - .clk = &mcspi1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi1_ick; - -static struct clk_hw_omap mcspi1_ick_hw = { - .hw = { - .clk = &mcspi1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi2_fck; - -static struct clk_hw_omap mcspi2_fck_hw = { - .hw = { - .clk = &mcspi2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi2_ick; - -static struct clk_hw_omap mcspi2_ick_hw = { - .hw = { - .clk = &mcspi2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi3_fck; - -static struct clk_hw_omap mcspi3_fck_hw = { - .hw = { - .clk = &mcspi3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi3_ick; - -static struct clk_hw_omap mcspi3_ick_hw = { - .hw = { - .clk = &mcspi3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi4_fck; - -static struct clk_hw_omap mcspi4_fck_hw = { - .hw = { - .clk = &mcspi4_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi4_ick; - -static struct clk_hw_omap mcspi4_ick_hw = { - .hw = { - .clk = &mcspi4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mmchs1_fck; - -static struct clk_hw_omap mmchs1_fck_hw = { - .hw = { - .clk = &mmchs1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MMC1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mmchs1_ick; - -static struct clk_hw_omap mmchs1_ick_hw = { - .hw = { - .clk = &mmchs1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MMC1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mmchs2_fck; - -static struct clk_hw_omap mmchs2_fck_hw = { - .hw = { - .clk = &mmchs2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MMC2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mmchs2_ick; - -static struct clk_hw_omap mmchs2_ick_hw = { - .hw = { - .clk = &mmchs2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MMC2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mmchs3_fck; - -static struct clk_hw_omap mmchs3_fck_hw = { - .hw = { - .clk = &mmchs3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mmchs3_ick; - -static struct clk_hw_omap mmchs3_ick_hw = { - .hw = { - .clk = &mmchs3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk modem_fck; - -static struct clk_hw_omap modem_fck_hw = { - .hw = { - .clk = &modem_fck, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MODEM_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk mspro_fck; - -static struct clk_hw_omap mspro_fck_hw = { - .hw = { - .clk = &mspro_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MSPRO_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mspro_ick; - -static struct clk_hw_omap mspro_ick_hw = { - .hw = { - .clk = &mspro_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MSPRO_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk omap_192m_alwon_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL); -DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names, - core_ck_ops); - -static struct clk omap_32ksync_ick; - -static struct clk_hw_omap omap_32ksync_ick_hw = { - .hw = { - .clk = &omap_32ksync_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_32KSYNC_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate omap_96m_alwon_fck_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_36XX }, - { .div = 2, .val = 2, .flags = RATE_IN_36XX }, - { .div = 0 } -}; - -static const struct clksel omap_96m_alwon_fck_clksel[] = { - { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates }, - { .parent = NULL } -}; - -static struct clk omap_96m_alwon_fck_3630; - -static const char *omap_96m_alwon_fck_3630_parent_names[] = { - "omap_192m_alwon_fck", -}; - -static const struct clk_ops omap_96m_alwon_fck_3630_ops = { - .set_rate = &omap2_clksel_set_rate, - .recalc_rate = &omap2_clksel_recalc, - .round_rate = &omap2_clksel_round_rate, -}; - -static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = { - .hw = { - .clk = &omap_96m_alwon_fck_3630, - }, - .clksel = omap_96m_alwon_fck_clksel, - .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - .clksel_mask = OMAP3630_CLKSEL_96M_MASK, -}; - -static struct clk omap_96m_alwon_fck_3630 = { - .name = "omap_96m_alwon_fck", - .hw = &omap_96m_alwon_fck_3630_hw.hw, - .parent_names = omap_96m_alwon_fck_3630_parent_names, - .num_parents = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names), - .ops = &omap_96m_alwon_fck_3630_ops, -}; - -static struct clk omapctrl_ick; - -static struct clk_hw_omap omapctrl_ick_hw = { - .hw = { - .clk = &omapctrl_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT, - .flags = ENABLE_ON_INIT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk per_48m_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm"); -DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops); - -static struct clk security_l3_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL); -DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops); - -static struct clk pka_ick; - -static const char *pka_ick_parent_names[] = { - "security_l3_ick", -}; - -static struct clk_hw_omap pka_ick_hw = { - .hw = { - .clk = &pka_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_PKA_SHIFT, -}; - -DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops); - -DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0, - OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk rng_ick; - -static struct clk_hw_omap rng_ick_hw = { - .hw = { - .clk = &rng_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_RNG_SHIFT, -}; - -DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk sad2d_ick; - -static struct clk_hw_omap sad2d_ick_hw = { - .hw = { - .clk = &sad2d_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SAD2D_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops); - -static struct clk sdrc_ick; - -static struct clk_hw_omap sdrc_ick_hw = { - .hw = { - .clk = &sdrc_ick, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SDRC_SHIFT, - .flags = ENABLE_ON_INIT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate sgx_core_rates[] = { - { .div = 2, .val = 5, .flags = RATE_IN_36XX }, - { .div = 3, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 4, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 6, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate sgx_96m_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate sgx_192m_rates[] = { - { .div = 1, .val = 4, .flags = RATE_IN_36XX }, - { .div = 0 } -}; - -static const struct clksel_rate sgx_corex2_rates[] = { - { .div = 3, .val = 6, .flags = RATE_IN_36XX }, - { .div = 5, .val = 7, .flags = RATE_IN_36XX }, - { .div = 0 } -}; - -static const struct clksel sgx_clksel[] = { - { .parent = &core_ck, .rates = sgx_core_rates }, - { .parent = &cm_96m_fck, .rates = sgx_96m_rates }, - { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates }, - { .parent = &corex2_fck, .rates = sgx_corex2_rates }, - { .parent = NULL }, -}; - -static const char *sgx_fck_parent_names[] = { - "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck", -}; - -static struct clk sgx_fck; - -static const struct clk_ops sgx_fck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .set_rate = &omap2_clksel_set_rate, - .round_rate = &omap2_clksel_round_rate, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel, - OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL), - OMAP3430ES2_CLKSEL_SGX_MASK, - OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN), - OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT, - &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops); - -static struct clk sgx_ick; - -static struct clk_hw_omap sgx_ick_hw = { - .hw = { - .clk = &sgx_ick, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN), - .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT, - .clkdm_name = "sgx_clkdm", -}; - -DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops); - -static struct clk sha11_ick; - -static struct clk_hw_omap sha11_ick_hw = { - .hw = { - .clk = &sha11_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_SHA11_SHIFT, -}; - -DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk sha12_ick; - -static struct clk_hw_omap sha12_ick_hw = { - .hw = { - .clk = &sha12_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SHA12_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk sr1_fck; - -static struct clk_hw_omap sr1_fck_hw = { - .hw = { - .clk = &sr1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_SR1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk sr2_fck; - -static struct clk_hw_omap sr2_fck_hw = { - .hw = { - .clk = &sr2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_SR2_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk sr_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk ssi_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk ssi_ick_3430es1; - -static const char *ssi_ick_3430es1_parent_names[] = { - "ssi_l4_ick", -}; - -static struct clk_hw_omap ssi_ick_3430es1_hw = { - .hw = { - .clk = &ssi_ick_3430es1, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SSI_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops); - -static struct clk ssi_ick_3430es2; - -static struct clk_hw_omap ssi_ick_3430es2_hw = { - .hw = { - .clk = &ssi_ick_3430es2, - }, - .ops = &clkhwops_omap3430es2_iclk_ssi_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SSI_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops); - -static const struct clksel_rate ssi_ssr_corex2_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 2, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 3, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 4, .val = 4, .flags = RATE_IN_3XXX }, - { .div = 6, .val = 6, .flags = RATE_IN_3XXX }, - { .div = 8, .val = 8, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel ssi_ssr_clksel[] = { - { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates }, - { .parent = NULL }, -}; - -static const char *ssi_ssr_fck_3430es1_parent_names[] = { - "corex2_fck", -}; - -static const struct clk_ops ssi_ssr_fck_3430es1_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .set_rate = &omap2_clksel_set_rate, - .round_rate = &omap2_clksel_round_rate, -}; - -DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm", - ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_SSI_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_SSI_SHIFT, - NULL, ssi_ssr_fck_3430es1_parent_names, - ssi_ssr_fck_3430es1_ops); - -DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm", - ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_SSI_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_SSI_SHIFT, - NULL, ssi_ssr_fck_3430es1_parent_names, - ssi_ssr_fck_3430es1_ops); - -DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1", - &ssi_ssr_fck_3430es1, 0x0, 1, 2); - -DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2", - &ssi_ssr_fck_3430es2, 0x0, 1, 2); - -static struct clk sys_clkout1; - -static const char *sys_clkout1_parent_names[] = { - "osc_sys_ck", -}; - -static struct clk_hw_omap sys_clkout1_hw = { - .hw = { - .clk = &sys_clkout1, - }, - .enable_reg = OMAP3430_PRM_CLKOUT_CTRL, - .enable_bit = OMAP3430_CLKOUT_EN_SHIFT, -}; - -DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops); - -DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0, - OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT, - OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL); - -DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH, - 0x0, NULL); - -DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_TRACECLK_SHIFT, - OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk ts_fck; - -static struct clk_hw_omap ts_fck_hw = { - .hw = { - .clk = &ts_fck, - }, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), - .enable_bit = OMAP3430ES2_EN_TS_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops); - -static struct clk uart1_fck; - -static struct clk_hw_omap uart1_fck_hw = { - .hw = { - .clk = &uart1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_UART1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk uart1_ick; - -static struct clk_hw_omap uart1_ick_hw = { - .hw = { - .clk = &uart1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_UART1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk uart2_fck; - -static struct clk_hw_omap uart2_fck_hw = { - .hw = { - .clk = &uart2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_UART2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk uart2_ick; - -static struct clk_hw_omap uart2_ick_hw = { - .hw = { - .clk = &uart2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_UART2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk uart3_fck; - -static const char *uart3_fck_parent_names[] = { - "per_48m_fck", -}; - -static struct clk_hw_omap uart3_fck_hw = { - .hw = { - .clk = &uart3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_UART3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops); - -static struct clk uart3_ick; - -static struct clk_hw_omap uart3_ick_hw = { - .hw = { - .clk = &uart3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_UART3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk uart4_fck; - -static struct clk_hw_omap uart4_fck_hw = { - .hw = { - .clk = &uart4_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3630_EN_UART4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops); - -static struct clk uart4_fck_am35xx; - -static struct clk_hw_omap uart4_fck_am35xx_hw = { - .hw = { - .clk = &uart4_fck_am35xx, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = AM35XX_EN_UART4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk uart4_ick; - -static struct clk_hw_omap uart4_ick_hw = { - .hw = { - .clk = &uart4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3630_EN_UART4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk uart4_ick_am35xx; - -static struct clk_hw_omap uart4_ick_am35xx_hw = { - .hw = { - .clk = &uart4_ick_am35xx, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = AM35XX_EN_UART4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate div2_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 2, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel usb_l4_clksel[] = { - { .parent = &l4_ick, .rates = div2_rates }, - { .parent = NULL }, -}; - -static const char *usb_l4_ick_parent_names[] = { - "l4_ick", -}; - -DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - OMAP3430ES1_EN_FSHOSTUSB_SHIFT, - &clkhwops_iclk_wait, usb_l4_ick_parent_names, - ssi_ssr_fck_3430es1_ops); - -static struct clk usbhost_120m_fck; - -static const char *usbhost_120m_fck_parent_names[] = { - "dpll5_m2_ck", -}; - -static struct clk_hw_omap usbhost_120m_fck_hw = { - .hw = { - .clk = &usbhost_120m_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT, - .clkdm_name = "usbhost_clkdm", -}; - -DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names, - aes2_ick_ops); - -static struct clk usbhost_48m_fck; - -static struct clk_hw_omap usbhost_48m_fck_hw = { - .hw = { - .clk = &usbhost_48m_fck, - }, - .ops = &clkhwops_omap3430es2_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT, - .clkdm_name = "usbhost_clkdm", -}; - -DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops); - -static struct clk usbhost_ick; - -static struct clk_hw_omap usbhost_ick_hw = { - .hw = { - .clk = &usbhost_ick, - }, - .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN), - .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT, - .clkdm_name = "usbhost_clkdm", -}; - -DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops); - -static struct clk usbtll_fck; - -static struct clk_hw_omap usbtll_fck_hw = { - .hw = { - .clk = &usbtll_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), - .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops); - -static struct clk usbtll_ick; - -static struct clk_hw_omap usbtll_ick_hw = { - .hw = { - .clk = &usbtll_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), - .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate usim_96m_rates[] = { - { .div = 2, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 4, .val = 4, .flags = RATE_IN_3XXX }, - { .div = 8, .val = 5, .flags = RATE_IN_3XXX }, - { .div = 10, .val = 6, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate usim_120m_rates[] = { - { .div = 4, .val = 7, .flags = RATE_IN_3XXX }, - { .div = 8, .val = 8, .flags = RATE_IN_3XXX }, - { .div = 16, .val = 9, .flags = RATE_IN_3XXX }, - { .div = 20, .val = 10, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel usim_clksel[] = { - { .parent = &omap_96m_fck, .rates = usim_96m_rates }, - { .parent = &dpll5_m2_ck, .rates = usim_120m_rates }, - { .parent = &sys_ck, .rates = div2_rates }, - { .parent = NULL }, -}; - -static const char *usim_fck_parent_names[] = { - "omap_96m_fck", "dpll5_m2_ck", "sys_ck", -}; - -static struct clk usim_fck; - -static const struct clk_ops usim_fck_ops = { - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel, - OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), - OMAP3430ES2_CLKSEL_USIMOCP_MASK, - OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait, - usim_fck_parent_names, usim_fck_ops); - -static struct clk usim_ick; - -static struct clk_hw_omap usim_ick_hw = { - .hw = { - .clk = &usim_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk vpfe_fck; - -static const char *vpfe_fck_parent_names[] = { - "pclk_ck", -}; - -static struct clk_hw_omap vpfe_fck_hw = { - .hw = { - .clk = &vpfe_fck, - }, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_VPFE_FCLK_SHIFT, -}; - -DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops); - -static struct clk vpfe_ick; - -static struct clk_hw_omap vpfe_ick_hw = { - .hw = { - .clk = &vpfe_ick, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_VPFE_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops); - -static struct clk wdt1_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops); - -static struct clk wdt1_ick; - -static struct clk_hw_omap wdt1_ick_hw = { - .hw = { - .clk = &wdt1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_WDT1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk wdt2_fck; - -static struct clk_hw_omap wdt2_fck_hw = { - .hw = { - .clk = &wdt2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_WDT2_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops); - -static struct clk wdt2_ick; - -static struct clk_hw_omap wdt2_ick_hw = { - .hw = { - .clk = &wdt2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_WDT2_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk wdt3_fck; - -static struct clk_hw_omap wdt3_fck_hw = { - .hw = { - .clk = &wdt3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_WDT3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk wdt3_ick; - -static struct clk_hw_omap wdt3_ick_hw = { - .hw = { - .clk = &wdt3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_WDT3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -/* - * clocks specific to omap3430es1 - */ -static struct omap_clk omap3430es1_clks[] = { - CLK(NULL, "gfx_l3_ck", &gfx_l3_ck), - CLK(NULL, "gfx_l3_fck", &gfx_l3_fck), - CLK(NULL, "gfx_l3_ick", &gfx_l3_ick), - CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck), - CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck), - CLK(NULL, "d2d_26m_fck", &d2d_26m_fck), - CLK(NULL, "fshostusb_fck", &fshostusb_fck), - CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1), - CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1), - CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1), - CLK(NULL, "fac_ick", &fac_ick), - CLK(NULL, "ssi_ick", &ssi_ick_3430es1), - CLK(NULL, "usb_l4_ick", &usb_l4_ick), - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1), - CLK("omapdss_dss", "ick", &dss_ick_3430es1), - CLK(NULL, "dss_ick", &dss_ick_3430es1), -}; - -/* - * clocks specific to am35xx - */ -static struct omap_clk am35xx_clks[] = { - CLK(NULL, "ipss_ick", &ipss_ick), - CLK(NULL, "rmii_ck", &rmii_ck), - CLK(NULL, "pclk_ck", &pclk_ck), - CLK(NULL, "emac_ick", &emac_ick), - CLK(NULL, "emac_fck", &emac_fck), - CLK("davinci_emac.0", NULL, &emac_ick), - CLK("davinci_mdio.0", NULL, &emac_fck), - CLK("vpfe-capture", "master", &vpfe_ick), - CLK("vpfe-capture", "slave", &vpfe_fck), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx), - CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx), - CLK(NULL, "hecc_ck", &hecc_ck), - CLK(NULL, "uart4_ick", &uart4_ick_am35xx), - CLK(NULL, "uart4_fck", &uart4_fck_am35xx), -}; - -/* - * clocks specific to omap36xx - */ -static struct omap_clk omap36xx_clks[] = { - CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck), - CLK(NULL, "uart4_fck", &uart4_fck), -}; - -/* - * clocks common to omap36xx omap34xx - */ -static struct omap_clk omap34xx_omap36xx_clks[] = { - CLK(NULL, "aes1_ick", &aes1_ick), - CLK("omap_rng", "ick", &rng_ick), - CLK("omap3-rom-rng", "ick", &rng_ick), - CLK(NULL, "sha11_ick", &sha11_ick), - CLK(NULL, "des1_ick", &des1_ick), - CLK(NULL, "cam_mclk", &cam_mclk), - CLK(NULL, "cam_ick", &cam_ick), - CLK(NULL, "csi2_96m_fck", &csi2_96m_fck), - CLK(NULL, "security_l3_ick", &security_l3_ick), - CLK(NULL, "pka_ick", &pka_ick), - CLK(NULL, "icr_ick", &icr_ick), - CLK("omap-aes", "ick", &aes2_ick), - CLK("omap-sham", "ick", &sha12_ick), - CLK(NULL, "des2_ick", &des2_ick), - CLK(NULL, "mspro_ick", &mspro_ick), - CLK(NULL, "mailboxes_ick", &mailboxes_ick), - CLK(NULL, "ssi_l4_ick", &ssi_l4_ick), - CLK(NULL, "sr1_fck", &sr1_fck), - CLK(NULL, "sr2_fck", &sr2_fck), - CLK(NULL, "sr_l4_ick", &sr_l4_ick), - CLK(NULL, "security_l4_ick2", &security_l4_ick2), - CLK(NULL, "wkup_l4_ick", &wkup_l4_ick), - CLK(NULL, "dpll2_fck", &dpll2_fck), - CLK(NULL, "iva2_ck", &iva2_ck), - CLK(NULL, "modem_fck", &modem_fck), - CLK(NULL, "sad2d_ick", &sad2d_ick), - CLK(NULL, "mad2d_ick", &mad2d_ick), - CLK(NULL, "mspro_fck", &mspro_fck), - CLK(NULL, "dpll2_ck", &dpll2_ck), - CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck), -}; - -/* - * clocks common to omap36xx and omap3430es2plus - */ -static struct omap_clk omap36xx_omap3430es2plus_clks[] = { - CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2), - CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2), - CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2), - CLK(NULL, "ssi_ick", &ssi_ick_3430es2), - CLK(NULL, "usim_fck", &usim_fck), - CLK(NULL, "usim_ick", &usim_ick), -}; - -/* - * clocks common to am35xx omap36xx and omap3430es2plus - */ -static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = { - CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck), - CLK(NULL, "dpll5_ck", &dpll5_ck), - CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck), - CLK(NULL, "sgx_fck", &sgx_fck), - CLK(NULL, "sgx_ick", &sgx_ick), - CLK(NULL, "cpefuse_fck", &cpefuse_fck), - CLK(NULL, "ts_fck", &ts_fck), - CLK(NULL, "usbtll_fck", &usbtll_fck), - CLK(NULL, "usbtll_ick", &usbtll_ick), - CLK("omap_hsmmc.2", "ick", &mmchs3_ick), - CLK(NULL, "mmchs3_ick", &mmchs3_ick), - CLK(NULL, "mmchs3_fck", &mmchs3_fck), - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2), - CLK("omapdss_dss", "ick", &dss_ick_3430es2), - CLK(NULL, "dss_ick", &dss_ick_3430es2), - CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck), - CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck), - CLK(NULL, "usbhost_ick", &usbhost_ick), -}; - -/* - * common clocks - */ -static struct omap_clk omap3xxx_clks[] = { - CLK(NULL, "apb_pclk", &dummy_apb_pclk), - CLK(NULL, "omap_32k_fck", &omap_32k_fck), - CLK(NULL, "virt_12m_ck", &virt_12m_ck), - CLK(NULL, "virt_13m_ck", &virt_13m_ck), - CLK(NULL, "virt_19200000_ck", &virt_19200000_ck), - CLK(NULL, "virt_26000000_ck", &virt_26000000_ck), - CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck), - CLK(NULL, "osc_sys_ck", &osc_sys_ck), - CLK("twl", "fck", &osc_sys_ck), - CLK(NULL, "sys_ck", &sys_ck), - CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck), - CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck), - CLK(NULL, "sys_altclk", &sys_altclk), - CLK(NULL, "mcbsp_clks", &mcbsp_clks), - CLK(NULL, "sys_clkout1", &sys_clkout1), - CLK(NULL, "dpll1_ck", &dpll1_ck), - CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck), - CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck), - CLK(NULL, "dpll3_ck", &dpll3_ck), - CLK(NULL, "core_ck", &core_ck), - CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck), - CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck), - CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck), - CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck), - CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck), - CLK(NULL, "dpll4_ck", &dpll4_ck), - CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck), - CLK(NULL, "omap_96m_fck", &omap_96m_fck), - CLK(NULL, "cm_96m_fck", &cm_96m_fck), - CLK(NULL, "omap_54m_fck", &omap_54m_fck), - CLK(NULL, "omap_48m_fck", &omap_48m_fck), - CLK(NULL, "omap_12m_fck", &omap_12m_fck), - CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck), - CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck), - CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck), - CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck), - CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck), - CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck), - CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck), - CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck), - CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck), - CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck), - CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck), - CLK(NULL, "clkout2_src_ck", &clkout2_src_ck), - CLK(NULL, "sys_clkout2", &sys_clkout2), - CLK(NULL, "corex2_fck", &corex2_fck), - CLK(NULL, "dpll1_fck", &dpll1_fck), - CLK(NULL, "mpu_ck", &mpu_ck), - CLK(NULL, "arm_fck", &arm_fck), - CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck), - CLK(NULL, "l3_ick", &l3_ick), - CLK(NULL, "l4_ick", &l4_ick), - CLK(NULL, "rm_ick", &rm_ick), - CLK(NULL, "gpt10_fck", &gpt10_fck), - CLK(NULL, "gpt11_fck", &gpt11_fck), - CLK(NULL, "core_96m_fck", &core_96m_fck), - CLK(NULL, "mmchs2_fck", &mmchs2_fck), - CLK(NULL, "mmchs1_fck", &mmchs1_fck), - CLK(NULL, "i2c3_fck", &i2c3_fck), - CLK(NULL, "i2c2_fck", &i2c2_fck), - CLK(NULL, "i2c1_fck", &i2c1_fck), - CLK(NULL, "mcbsp5_fck", &mcbsp5_fck), - CLK(NULL, "mcbsp1_fck", &mcbsp1_fck), - CLK(NULL, "core_48m_fck", &core_48m_fck), - CLK(NULL, "mcspi4_fck", &mcspi4_fck), - CLK(NULL, "mcspi3_fck", &mcspi3_fck), - CLK(NULL, "mcspi2_fck", &mcspi2_fck), - CLK(NULL, "mcspi1_fck", &mcspi1_fck), - CLK(NULL, "uart2_fck", &uart2_fck), - CLK(NULL, "uart1_fck", &uart1_fck), - CLK(NULL, "core_12m_fck", &core_12m_fck), - CLK("omap_hdq.0", "fck", &hdq_fck), - CLK(NULL, "hdq_fck", &hdq_fck), - CLK(NULL, "core_l3_ick", &core_l3_ick), - CLK(NULL, "sdrc_ick", &sdrc_ick), - CLK(NULL, "gpmc_fck", &gpmc_fck), - CLK(NULL, "core_l4_ick", &core_l4_ick), - CLK("omap_hsmmc.1", "ick", &mmchs2_ick), - CLK("omap_hsmmc.0", "ick", &mmchs1_ick), - CLK(NULL, "mmchs2_ick", &mmchs2_ick), - CLK(NULL, "mmchs1_ick", &mmchs1_ick), - CLK("omap_hdq.0", "ick", &hdq_ick), - CLK(NULL, "hdq_ick", &hdq_ick), - CLK("omap2_mcspi.4", "ick", &mcspi4_ick), - CLK("omap2_mcspi.3", "ick", &mcspi3_ick), - CLK("omap2_mcspi.2", "ick", &mcspi2_ick), - CLK("omap2_mcspi.1", "ick", &mcspi1_ick), - CLK(NULL, "mcspi4_ick", &mcspi4_ick), - CLK(NULL, "mcspi3_ick", &mcspi3_ick), - CLK(NULL, "mcspi2_ick", &mcspi2_ick), - CLK(NULL, "mcspi1_ick", &mcspi1_ick), - CLK("omap_i2c.3", "ick", &i2c3_ick), - CLK("omap_i2c.2", "ick", &i2c2_ick), - CLK("omap_i2c.1", "ick", &i2c1_ick), - CLK(NULL, "i2c3_ick", &i2c3_ick), - CLK(NULL, "i2c2_ick", &i2c2_ick), - CLK(NULL, "i2c1_ick", &i2c1_ick), - CLK(NULL, "uart2_ick", &uart2_ick), - CLK(NULL, "uart1_ick", &uart1_ick), - CLK(NULL, "gpt11_ick", &gpt11_ick), - CLK(NULL, "gpt10_ick", &gpt10_ick), - CLK("omap-mcbsp.5", "ick", &mcbsp5_ick), - CLK("omap-mcbsp.1", "ick", &mcbsp1_ick), - CLK(NULL, "mcbsp5_ick", &mcbsp5_ick), - CLK(NULL, "mcbsp1_ick", &mcbsp1_ick), - CLK(NULL, "omapctrl_ick", &omapctrl_ick), - CLK(NULL, "dss_tv_fck", &dss_tv_fck), - CLK(NULL, "dss_96m_fck", &dss_96m_fck), - CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck), - CLK(NULL, "init_60m_fclk", &dummy_ck), - CLK(NULL, "gpt1_fck", &gpt1_fck), - CLK(NULL, "aes2_ick", &aes2_ick), - CLK(NULL, "wkup_32k_fck", &wkup_32k_fck), - CLK(NULL, "gpio1_dbck", &gpio1_dbck), - CLK(NULL, "sha12_ick", &sha12_ick), - CLK(NULL, "wdt2_fck", &wdt2_fck), - CLK("omap_wdt", "ick", &wdt2_ick), - CLK(NULL, "wdt2_ick", &wdt2_ick), - CLK(NULL, "wdt1_ick", &wdt1_ick), - CLK(NULL, "gpio1_ick", &gpio1_ick), - CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick), - CLK(NULL, "gpt12_ick", &gpt12_ick), - CLK(NULL, "gpt1_ick", &gpt1_ick), - CLK(NULL, "per_96m_fck", &per_96m_fck), - CLK(NULL, "per_48m_fck", &per_48m_fck), - CLK(NULL, "uart3_fck", &uart3_fck), - CLK(NULL, "gpt2_fck", &gpt2_fck), - CLK(NULL, "gpt3_fck", &gpt3_fck), - CLK(NULL, "gpt4_fck", &gpt4_fck), - CLK(NULL, "gpt5_fck", &gpt5_fck), - CLK(NULL, "gpt6_fck", &gpt6_fck), - CLK(NULL, "gpt7_fck", &gpt7_fck), - CLK(NULL, "gpt8_fck", &gpt8_fck), - CLK(NULL, "gpt9_fck", &gpt9_fck), - CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck), - CLK(NULL, "gpio6_dbck", &gpio6_dbck), - CLK(NULL, "gpio5_dbck", &gpio5_dbck), - CLK(NULL, "gpio4_dbck", &gpio4_dbck), - CLK(NULL, "gpio3_dbck", &gpio3_dbck), - CLK(NULL, "gpio2_dbck", &gpio2_dbck), - CLK(NULL, "wdt3_fck", &wdt3_fck), - CLK(NULL, "per_l4_ick", &per_l4_ick), - CLK(NULL, "gpio6_ick", &gpio6_ick), - CLK(NULL, "gpio5_ick", &gpio5_ick), - CLK(NULL, "gpio4_ick", &gpio4_ick), - CLK(NULL, "gpio3_ick", &gpio3_ick), - CLK(NULL, "gpio2_ick", &gpio2_ick), - CLK(NULL, "wdt3_ick", &wdt3_ick), - CLK(NULL, "uart3_ick", &uart3_ick), - CLK(NULL, "uart4_ick", &uart4_ick), - CLK(NULL, "gpt9_ick", &gpt9_ick), - CLK(NULL, "gpt8_ick", &gpt8_ick), - CLK(NULL, "gpt7_ick", &gpt7_ick), - CLK(NULL, "gpt6_ick", &gpt6_ick), - CLK(NULL, "gpt5_ick", &gpt5_ick), - CLK(NULL, "gpt4_ick", &gpt4_ick), - CLK(NULL, "gpt3_ick", &gpt3_ick), - CLK(NULL, "gpt2_ick", &gpt2_ick), - CLK("omap-mcbsp.2", "ick", &mcbsp2_ick), - CLK("omap-mcbsp.3", "ick", &mcbsp3_ick), - CLK("omap-mcbsp.4", "ick", &mcbsp4_ick), - CLK(NULL, "mcbsp4_ick", &mcbsp2_ick), - CLK(NULL, "mcbsp3_ick", &mcbsp3_ick), - CLK(NULL, "mcbsp2_ick", &mcbsp4_ick), - CLK(NULL, "mcbsp2_fck", &mcbsp2_fck), - CLK(NULL, "mcbsp3_fck", &mcbsp3_fck), - CLK(NULL, "mcbsp4_fck", &mcbsp4_fck), - CLK("etb", "emu_src_ck", &emu_src_ck), - CLK(NULL, "emu_src_ck", &emu_src_ck), - CLK(NULL, "pclk_fck", &pclk_fck), - CLK(NULL, "pclkx2_fck", &pclkx2_fck), - CLK(NULL, "atclk_fck", &atclk_fck), - CLK(NULL, "traceclk_src_fck", &traceclk_src_fck), - CLK(NULL, "traceclk_fck", &traceclk_fck), - CLK(NULL, "secure_32k_fck", &secure_32k_fck), - CLK(NULL, "gpt12_fck", &gpt12_fck), - CLK(NULL, "wdt1_fck", &wdt1_fck), - CLK(NULL, "timer_32k_ck", &omap_32k_fck), - CLK(NULL, "timer_sys_ck", &sys_ck), - CLK(NULL, "cpufreq_ck", &dpll1_ck), -}; - -static const char *enable_init_clks[] = { - "sdrc_ick", - "gpmc_fck", - "omapctrl_ick", -}; - -int __init omap3xxx_clk_init(void) -{ - if (omap3_has_192mhz_clk()) - omap_96m_alwon_fck = omap_96m_alwon_fck_3630; - - if (cpu_is_omap3630()) { - dpll3_m3x2_ck = dpll3_m3x2_ck_3630; - dpll4_m2x2_ck = dpll4_m2x2_ck_3630; - dpll4_m3x2_ck = dpll4_m3x2_ck_3630; - dpll4_m4x2_ck = dpll4_m4x2_ck_3630; - dpll4_m5x2_ck = dpll4_m5x2_ck_3630; - dpll4_m6x2_ck = dpll4_m6x2_ck_3630; - } - - /* - * XXX This type of dynamic rewriting of the clock tree is - * deprecated and should be revised soon. - */ - if (cpu_is_omap3630()) - dpll4_dd = dpll4_dd_3630; - else - dpll4_dd = dpll4_dd_34xx; - - - /* - * 3505 must be tested before 3517, since 3517 returns true - * for both AM3517 chips and AM3517 family chips, which - * includes 3505. Unfortunately there's no obvious family - * test for 3517/3505 :-( - */ - if (soc_is_am35xx()) { - cpu_mask = RATE_IN_34XX; - omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks)); - omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks)); - omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks)); - } else if (cpu_is_omap3630()) { - cpu_mask = (RATE_IN_34XX | RATE_IN_36XX); - omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks)); - omap_clocks_register(omap36xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_omap3430es2plus_clks)); - omap_clocks_register(omap34xx_omap36xx_clks, - ARRAY_SIZE(omap34xx_omap36xx_clks)); - omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks)); - omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks)); - } else if (cpu_is_omap34xx()) { - if (omap_rev() == OMAP3430_REV_ES1_0) { - cpu_mask = RATE_IN_3430ES1; - omap_clocks_register(omap3430es1_clks, - ARRAY_SIZE(omap3430es1_clks)); - omap_clocks_register(omap34xx_omap36xx_clks, - ARRAY_SIZE(omap34xx_omap36xx_clks)); - omap_clocks_register(omap3xxx_clks, - ARRAY_SIZE(omap3xxx_clks)); - } else { - /* - * Assume that anything that we haven't matched yet - * has 3430ES2-type clocks. - */ - cpu_mask = RATE_IN_3430ES2PLUS; - omap_clocks_register(omap34xx_omap36xx_clks, - ARRAY_SIZE(omap34xx_omap36xx_clks)); - omap_clocks_register(omap36xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_omap3430es2plus_clks)); - omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks)); - omap_clocks_register(omap3xxx_clks, - ARRAY_SIZE(omap3xxx_clks)); - } - } else { - WARN(1, "clock: could not identify OMAP3 variant\n"); - } - - omap2_clk_disable_autoidle_all(); - - omap2_clk_enable_init_clocks(enable_init_clks, - ARRAY_SIZE(enable_init_clks)); - - pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n", - (clk_get_rate(&osc_sys_ck) / 1000000), - (clk_get_rate(&osc_sys_ck) / 100000) % 10, - (clk_get_rate(&core_ck) / 1000000), - (clk_get_rate(&arm_fck) / 1000000)); - - /* - * Lock DPLL5 -- here only until other device init code can - * handle this - */ - if (omap_rev() >= OMAP3430_REV_ES2_0) - omap3_clk_lock_dpll5(); - - /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */ - sdrc_ick_p = clk_get(NULL, "sdrc_ick"); - arm_fck_p = clk_get(NULL, "arm_fck"); - - return 0; -} diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index 4ae4ccebced2..6124db5c37ae 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -23,7 +23,6 @@ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/bitops.h> -#include <linux/clk-private.h> #include <asm/cpu.h> #include <trace/events/power.h> @@ -633,21 +632,6 @@ const struct clk_hw_omap_ops clkhwops_wait = { }; /** - * omap_clocks_register - register an array of omap_clk - * @ocs: pointer to an array of omap_clk to register - */ -void __init omap_clocks_register(struct omap_clk oclks[], int cnt) -{ - struct omap_clk *c; - - for (c = oclks; c < oclks + cnt; c++) { - clkdev_add(&c->lk); - if (!__clk_init(NULL, c->lk.clk)) - omap2_init_clk_hw_omap_clocks(c->lk.clk); - } -} - -/** * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument * @mpurate_ck_name: clk name of the clock to change rate * diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index 1cf9dd85248a..a56742f96000 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -40,23 +40,29 @@ struct omap_clk { struct clockdomain; #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \ - static struct clk _name = { \ + static struct clk_core _name##_core = { \ .name = #_name, \ .hw = &_name##_hw.hw, \ .parent_names = _parent_array_name, \ .num_parents = ARRAY_SIZE(_parent_array_name), \ .ops = &_clkops_name, \ + }; \ + static struct clk _name = { \ + .core = &_name##_core, \ }; #define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \ _clkops_name, _flags) \ - static struct clk _name = { \ + static struct clk_core _name##_core = { \ .name = #_name, \ .hw = &_name##_hw.hw, \ .parent_names = _parent_array_name, \ .num_parents = ARRAY_SIZE(_parent_array_name), \ .ops = &_clkops_name, \ .flags = _flags, \ + }; \ + static struct clk _name = { \ + .core = &_name##_core, \ }; #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \ @@ -238,7 +244,6 @@ struct ti_clk_features { extern struct ti_clk_features ti_clk_features; extern const struct clkops clkops_omap2_dflt_wait; -extern const struct clkops clkops_dummy; extern const struct clkops clkops_omap2_dflt; extern struct clk_functions omap2_clk_functions; @@ -247,7 +252,6 @@ extern const struct clksel_rate gpt_32k_rates[]; extern const struct clksel_rate gpt_sys_rates[]; extern const struct clksel_rate gfx_l3_rates[]; extern const struct clksel_rate dsp_ick_rates[]; -extern struct clk dummy_ck; extern const struct clk_hw_omap_ops clkhwops_iclk_wait; extern const struct clk_hw_omap_ops clkhwops_wait; @@ -272,7 +276,5 @@ extern void __iomem *clk_memmaps[]; extern int omap2_clkops_enable_clkdm(struct clk_hw *hw); extern void omap2_clkops_disable_clkdm(struct clk_hw *hw); -extern void omap_clocks_register(struct omap_clk *oclks, int cnt); - void __init ti_clk_init_features(void); #endif diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c index ef4d21bfb964..61b60dfb14ce 100644 --- a/arch/arm/mach-omap2/clock_common_data.c +++ b/arch/arm/mach-omap2/clock_common_data.c @@ -16,7 +16,6 @@ * OMAP3xxx clock definition files. */ -#include <linux/clk-private.h> #include "clock.h" /* clksel_rate data common to 24xx/343x */ @@ -114,13 +113,3 @@ const struct clksel_rate div31_1to31_rates[] = { { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, { .div = 0 }, }; - -/* Clocks shared between various OMAP SoCs */ - -static struct clk_ops dummy_ck_ops = {}; - -struct clk dummy_ck = { - .name = "dummy_clk", - .ops = &dummy_ck_ops, - .flags = CLK_IS_BASIC, -}; diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index c2da2a0fe5ad..44e57ec225d4 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -410,7 +410,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw) struct clk_hw_omap *clk = to_clk_hw_omap(hw); int r; struct dpll_data *dd; - struct clk *parent; + struct clk_hw *parent; dd = clk->dpll_data; if (!dd) @@ -427,13 +427,13 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw) } } - parent = __clk_get_parent(hw->clk); + parent = __clk_get_hw(__clk_get_parent(hw->clk)); if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { - WARN_ON(parent != dd->clk_bypass); + WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); r = _omap3_noncore_dpll_bypass(clk); } else { - WARN_ON(parent != dd->clk_ref); + WARN_ON(parent != __clk_get_hw(dd->clk_ref)); r = _omap3_noncore_dpll_lock(clk); } @@ -473,6 +473,8 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw) * in failure. */ long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { @@ -549,7 +551,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, if (!dd) return -EINVAL; - if (__clk_get_parent(hw->clk) != dd->clk_ref) + if (__clk_get_hw(__clk_get_parent(hw->clk)) != + __clk_get_hw(dd->clk_ref)) return -EINVAL; if (dd->last_rounded_rate == 0) diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c index fc712240e5fd..f231be05b9a6 100644 --- a/arch/arm/mach-omap2/dpll44xx.c +++ b/arch/arm/mach-omap2/dpll44xx.c @@ -202,6 +202,8 @@ out: * in failure. */ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index e60780f05374..c4871c55bd8b 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -461,7 +461,17 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_soc_init = omap3xxx_clk_init; + if (!of_have_populated_dt()) { + omap3_prcm_legacy_iomaps_init(); + if (soc_is_am35xx()) + omap_clk_soc_init = am35xx_clk_legacy_init; + else if (cpu_is_omap3630()) + omap_clk_soc_init = omap36xx_clk_legacy_init; + else if (omap_rev() == OMAP3430_REV_ES1_0) + omap_clk_soc_init = omap3430es1_clk_legacy_init; + else + omap_clk_soc_init = omap3430_clk_legacy_init; + } } void __init omap3430_init_early(void) @@ -753,15 +763,17 @@ int __init omap_clk_init(void) ti_clk_init_features(); - ret = of_prcm_init(); - if (ret) - return ret; + if (of_have_populated_dt()) { + ret = of_prcm_init(); + if (ret) + return ret; - of_clk_init(NULL); + of_clk_init(NULL); - ti_dt_clk_init_retry_clks(); + ti_dt_clk_init_retry_clks(); - ti_dt_clockdomains_setup(); + ti_dt_clockdomains_setup(); + } ret = omap_clk_soc_init(); diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 2418bdf28ca2..cee0fe1ee6ff 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -242,7 +242,7 @@ static int __init omap4_sar_ram_init(void) } omap_early_initcall(omap4_sar_ram_init); -static struct of_device_id gic_match[] = { +static const struct of_device_id gic_match[] = { { .compatible = "arm,cortex-a9-gic", }, { .compatible = "arm,cortex-a15-gic", }, { }, diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h index 77752e49d8d4..b9061a6a2db8 100644 --- a/arch/arm/mach-omap2/prm.h +++ b/arch/arm/mach-omap2/prm.h @@ -20,6 +20,7 @@ extern void __iomem *prm_base; extern u16 prm_features; extern void omap2_set_globals_prm(void __iomem *prm); int of_prcm_init(void); +void omap3_prcm_legacy_iomaps_init(void); # endif /* diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index c5e00c6714b1..5713bbdf83bc 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -674,7 +674,7 @@ int __init omap3xxx_prm_init(void) return prm_register(&omap3xxx_prm_ll_data); } -static struct of_device_id omap3_prm_dt_match_table[] = { +static const struct of_device_id omap3_prm_dt_match_table[] = { { .compatible = "ti,omap3-prm" }, { } }; diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 408c64efb807..a08a617a6c11 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -712,7 +712,7 @@ int __init omap44xx_prm_init(void) return prm_register(&omap44xx_prm_ll_data); } -static struct of_device_id omap_prm_dt_match_table[] = { +static const struct of_device_id omap_prm_dt_match_table[] = { { .compatible = "ti,omap4-prm" }, { .compatible = "ti,omap5-prm" }, { .compatible = "ti,dra7-prm" }, diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 264b5e29404d..bfaa7ba595cc 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -35,6 +35,8 @@ #include "prm44xx.h" #include "common.h" #include "clock.h" +#include "cm.h" +#include "control.h" /* * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs @@ -641,6 +643,15 @@ int __init of_prcm_init(void) return 0; } +void __init omap3_prcm_legacy_iomaps_init(void) +{ + ti_clk_ll_ops = &omap_clk_ll_ops; + + clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD; + clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD; + clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get(); +} + static int __init prm_late_init(void) { if (prm_ll_data->late_init) diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig index a219dc310d5d..e03d8b5c9ad0 100644 --- a/arch/arm/mach-prima2/Kconfig +++ b/arch/arm/mach-prima2/Kconfig @@ -27,7 +27,6 @@ config ARCH_ATLAS7 select CPU_V7 select HAVE_ARM_SCU if SMP select HAVE_SMP - select SMP_ON_UP if SMP help Support for CSR SiRFSoC ARM Cortex A7 Platform diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c index 0c819bb88418..8cadb302a7d2 100644 --- a/arch/arm/mach-prima2/common.c +++ b/arch/arm/mach-prima2/common.c @@ -21,7 +21,7 @@ static void __init sirfsoc_init_late(void) } #ifdef CONFIG_ARCH_ATLAS6 -static const char *atlas6_dt_match[] __initconst = { +static const char *const atlas6_dt_match[] __initconst = { "sirf,atlas6", NULL }; @@ -36,7 +36,7 @@ MACHINE_END #endif #ifdef CONFIG_ARCH_PRIMA2 -static const char *prima2_dt_match[] __initconst = { +static const char *const prima2_dt_match[] __initconst = { "sirf,prima2", NULL }; @@ -52,7 +52,7 @@ MACHINE_END #endif #ifdef CONFIG_ARCH_ATLAS7 -static const char *atlas7_dt_match[] __initdata = { +static const char *const atlas7_dt_match[] __initconst = { "sirf,atlas7", NULL }; diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c index fc2b03c81e5f..e46c91094dde 100644 --- a/arch/arm/mach-prima2/platsmp.c +++ b/arch/arm/mach-prima2/platsmp.c @@ -40,7 +40,7 @@ static void sirfsoc_secondary_init(unsigned int cpu) spin_unlock(&boot_lock); } -static struct of_device_id clk_ids[] = { +static const struct of_device_id clk_ids[] = { { .compatible = "sirf,atlas7-clkc" }, {}, }; diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 343c4e3a7c5d..7d8eab857a93 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c @@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = { } }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static void idp_backlight_power(int on) diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index ad777b353bd5..28da319d389f 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -24,6 +24,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/pwm_backlight.h> +#include <linux/smc91x.h> #include <asm/types.h> #include <asm/setup.h> @@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = { [1] = { .start = LPD270_ETHERNET_IRQ, .end = LPD270_ETHERNET_IRQ, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; +struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static struct resource lpd270_flash_resources[] = { diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 850e506926df..c309593abdb2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -28,6 +28,7 @@ #include <linux/platform_data/video-clcd-versatile.h> #include <linux/io.h> #include <linux/smsc911x.h> +#include <linux/smc91x.h> #include <linux/ata_platform.h> #include <linux/amba/mmci.h> #include <linux/gfp.h> @@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, +}; + static struct platform_device realview_eth_device = { .name = "smsc911x", .id = 0, @@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res) realview_eth_device.resource = res; if (strcmp(realview_eth_device.name, "smsc911x") == 0) realview_eth_device.dev.platform_data = &smsc911x_config; + else + realview_eth_device.dev.platform_data = &smc91x_platdata; return platform_device_register(&realview_eth_device); } diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 64c88d657f9e..b3869cbbcc68 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = { [1] = { .start = IRQ_EB_ETH, .end = IRQ_EB_ETH, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index 5078932c1683..ae4eb7cc4bcc 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig @@ -11,6 +11,7 @@ config ARCH_ROCKCHIP select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select DW_APB_TIMER_OF + select REGULATOR if PM select ROCKCHIP_TIMER select ARM_GLOBAL_TIMER select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h index 7d752ff39f91..7c889c04604b 100644 --- a/arch/arm/mach-rockchip/pm.h +++ b/arch/arm/mach-rockchip/pm.h @@ -24,7 +24,13 @@ extern unsigned long rkpm_bootdata_ddr_data; extern unsigned long rk3288_bootram_sz; void rockchip_slp_cpu_resume(void); +#ifdef CONFIG_PM_SLEEP void __init rockchip_suspend_init(void); +#else +static inline void rockchip_suspend_init(void) +{ +} +#endif /****** following is rk3288 defined **********/ #define RK3288_PMU_WAKEUP_CFG0 0x00 diff --git a/arch/arm/mach-s5pv210/s5pv210.c b/arch/arm/mach-s5pv210/s5pv210.c index 43eb1eaea0c9..83e656ea95ae 100644 --- a/arch/arm/mach-s5pv210/s5pv210.c +++ b/arch/arm/mach-s5pv210/s5pv210.c @@ -63,7 +63,7 @@ static void __init s5pv210_dt_init_late(void) s5pv210_pm_init(); } -static char const *s5pv210_dt_compat[] __initconst = { +static char const *const s5pv210_dt_compat[] __initconst = { "samsung,s5pc110", "samsung,s5pv210", NULL diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 169262e3040d..7b0cd3172354 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -12,6 +12,7 @@ #include <linux/pm.h> #include <linux/serial_core.h> #include <linux/slab.h> +#include <linux/smc91x.h> #include <asm/mach-types.h> #include <asm/mach/map.h> @@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev) 0x02000000, "smc91x-attrib"), { .flags = IORESOURCE_IRQ }, }; + struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT, + }; struct platform_device_info smc91x_devinfo = { .parent = &dev->dev, .name = "smc91x", .id = 0, .res = smc91x_resources, .num_res = ARRAY_SIZE(smc91x_resources), + .data = &smc91c_platdata, + .size_data = sizeof(smc91c_platdata), }; int ret, irq; diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 091261878eff..696fd0fe4806 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c @@ -11,6 +11,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/mtd/partitions.h> +#include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/setup.h> @@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = { #endif }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev = { + .platform_data = &smc91c_platdata, + }, }; static struct platform_device *devices[] __initdata = { diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c index aad97be9cbe1..37f7b15c01bc 100644 --- a/arch/arm/mach-shmobile/setup-emev2.c +++ b/arch/arm/mach-shmobile/setup-emev2.c @@ -37,7 +37,7 @@ static void __init emev2_map_io(void) iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc)); } -static const char *emev2_boards_compat_dt[] __initconst = { +static const char *const emev2_boards_compat_dt[] __initconst = { "renesas,emev2", NULL, }; diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig index 8825bc9e2553..3b1ac463a494 100644 --- a/arch/arm/mach-sti/Kconfig +++ b/arch/arm/mach-sti/Kconfig @@ -13,6 +13,7 @@ menuconfig ARCH_STI select ARM_ERRATA_775420 select PL310_ERRATA_753970 if CACHE_L2X0 select PL310_ERRATA_769419 if CACHE_L2X0 + select RESET_CONTROLLER help Include support for STiH41x SOCs like STiH415/416 using the device tree for discovery diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index ef016af1c9e7..914341bcef25 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -91,8 +91,6 @@ static void __init tegra_dt_init(void) struct soc_device *soc_dev; struct device *parent = NULL; - tegra_clocks_apply_init_table(); - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) goto out; diff --git a/arch/arm/mach-ux500/pm_domains.c b/arch/arm/mach-ux500/pm_domains.c index 0d4b5b46f15b..4d71c90f801c 100644 --- a/arch/arm/mach-ux500/pm_domains.c +++ b/arch/arm/mach-ux500/pm_domains.c @@ -49,7 +49,7 @@ static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = { [DOMAIN_VAPE] = &ux500_pm_domain_vape, }; -static struct of_device_id ux500_pm_domain_matches[] = { +static const struct of_device_id ux500_pm_domain_matches[] __initconst = { { .compatible = "stericsson,ux500-pm-domains", }, { }, }; diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c index 9f9bc61ca64b..7de3e92a13b0 100644 --- a/arch/arm/mach-versatile/versatile_dt.c +++ b/arch/arm/mach-versatile/versatile_dt.c @@ -35,7 +35,7 @@ static void __init versatile_dt_init(void) versatile_auxdata_lookup, NULL); } -static const char *versatile_dt_match[] __initconst = { +static const char *const versatile_dt_match[] __initconst = { "arm,versatile-ab", "arm,versatile-pb", NULL, diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index d6b16d9a7838..3c2509b4b694 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig @@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM depends on MCPM select ARM_CCI select ARCH_VEXPRESS_SPC + select ARM_CPU_SUSPEND help Support for CPU and cluster power management on Versatile Express with a TC2 (A15x2 A7x3) big.LITTLE core tile. diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c43c71455566..9b4f29e595a4 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -892,13 +892,6 @@ config CACHE_L2X0 if CACHE_L2X0 -config CACHE_PL310 - bool - default y if CPU_V7 && !(CPU_V6 || CPU_V6K) - help - This option enables optimisations for the PL310 cache - controller. - config PL310_ERRATA_588369 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" help diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 903dba064a03..170a116d1b29 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, int i = 0; if (array_size <= PAGE_SIZE) - pages = kzalloc(array_size, gfp); + pages = kzalloc(array_size, GFP_KERNEL); else pages = vzalloc(array_size); if (!pages) diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts index 27f32962e55c..4eac8dcea423 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dts +++ b/arch/arm64/boot/dts/arm/foundation-v8.dts @@ -34,6 +34,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -41,6 +42,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -48,6 +50,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -55,6 +58,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index d429129ecb3d..133ee59de2d7 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -39,6 +39,7 @@ reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A57_1: cpu@1 { @@ -46,6 +47,7 @@ reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A53_0: cpu@100 { @@ -53,6 +55,7 @@ reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_1: cpu@101 { @@ -60,6 +63,7 @@ reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_2: cpu@102 { @@ -67,6 +71,7 @@ reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_3: cpu@103 { @@ -74,6 +79,15 @@ reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A57_L2: l2-cache0 { + compatible = "cache"; + }; + + A53_L2: l2-cache1 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index efc59b3baf63..20addabbd127 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -37,6 +37,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -44,6 +45,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -51,6 +53,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -58,6 +61,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 5720608c50b1..abb79b3cfcfe 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o aes-neon-blk-y := aes-glue-neon.o aes-neon.o -AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE +AFLAGS_aes-ce.o := -DINTERLEAVE=4 AFLAGS_aes-neon.o := -DINTERLEAVE=4 CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5901480bfdca..750bac4e637e 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -20,6 +20,9 @@ #error "Only include this from assembly code" #endif +#ifndef __ASM_ASSEMBLER_H +#define __ASM_ASSEMBLER_H + #include <asm/ptrace.h> #include <asm/thread_info.h> @@ -155,3 +158,5 @@ lr .req x30 // link register #endif orr \rd, \lbits, \hbits, lsl #32 .endm + +#endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 0710654631e7..c60643f14cda 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -1,6 +1,8 @@ #ifndef __ASM_CPUIDLE_H #define __ASM_CPUIDLE_H +#include <asm/proc-fns.h> + #ifdef CONFIG_CPU_IDLE extern int cpu_init_idle(unsigned int cpu); extern int cpu_suspend(unsigned long arg); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index e2ff32a93b5c..d2f49423c5dc 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) -__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) -__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) +__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) +__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) +__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 16449c535e50..800ec0e87ed9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_VALID | PTE_WRITE; + PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f9be30ea1cbd..20e9591a60cf 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -45,7 +45,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ -#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK +extern phys_addr_t arm64_dma_phys_limit; +#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) #endif /* __KERNEL__ */ struct debug_info { diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 73f0ce570fb3..4abe9b945f77 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -24,11 +24,6 @@ #include <linux/sched.h> #include <asm/cputype.h> -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); - -extern struct cpu_tlb_fns cpu_tlb; - /* * TLB Management * ============== diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 3bf8f4e99a51..07e1ba449bf1 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) /* * Return 1 if addr < current->addr_limit, 0 otherwise. @@ -147,7 +147,7 @@ do { \ default: \ BUILD_BUG(); \ } \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user(x, ptr) \ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bef04afd6031..5ee07eee80c2 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o cpu_errata.o alternative.o cacheinfo.o + hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + return_address.o cpuinfo.o cpu_errata.o \ + alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index cf8556ae09d0..c851be795080 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable) branch = aarch64_insn_gen_branch_imm(pc, (unsigned long)ftrace_graph_caller, - AARCH64_INSN_BRANCH_LINK); + AARCH64_INSN_BRANCH_NOLINK); nop = aarch64_insn_gen_nop(); if (enable) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 27d4864577e5..c8eca88f12e6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); - else + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) page = virt_to_page(addr); + else + return addr; BUG_ON(!page); set_fixmap(fixmap, page_to_phys(page)); diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S new file mode 100644 index 000000000000..cf83e61cd3b5 --- /dev/null +++ b/arch/arm64/kernel/psci-call.S @@ -0,0 +1,28 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/linkage.h> + +/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_hvc) + hvc #0 + ret +ENDPROC(__invoke_psci_fn_hvc) + +/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_smc) + smc #0 + ret +ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f311c49e..9b8a70ae64a1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -57,6 +57,9 @@ static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); typedef int (*psci_initcall_t)(const struct device_node *); +asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); +asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); + enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, @@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state, PSCI_0_2_POWER_STATE_AFFL_SHIFT; } -/* - * The following two functions are invoked via the invoke_psci_fn pointer - * and will not be inlined, allowing us to piggyback on the AAPCS. - */ -static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "hvc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "smc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - static int psci_get_version(void) { int err; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c20a300e2213..d26fcd4cd6e6 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, - &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); @@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_SYS: err |= __put_user((compat_uptr_t)(unsigned long) diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index fe652ffd34c2..efa79e8d4196 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime) /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ ENTRY(__kernel_clock_getres) .cfi_startproc - cbz w1, 3f - cmp w0, #CLOCK_REALTIME ccmp w0, #CLOCK_MONOTONIC, #0x4, ne b.ne 1f @@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: + cbz w1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0a24b9b8c698..58e0c2bdde04 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -extern int swiotlb_late_init_with_default_size(size_t default_size); - static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -411,21 +409,13 @@ out: return -ENOMEM; } -static int __init swiotlb_late_init(void) +static int __init arm64_dma_init(void) { - size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); + int ret; dma_ops = &swiotlb_dma_ops; - return swiotlb_late_init_with_default_size(swiotlb_size); -} - -static int __init arm64_dma_init(void) -{ - int ret = 0; - - ret |= swiotlb_late_init(); - ret |= atomic_pool_init(); + ret = atomic_pool_init(); return ret; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 71145f952070..ae85da6307bb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -33,6 +33,7 @@ #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/efi.h> +#include <linux/swiotlb.h> #include <asm/fixmap.h> #include <asm/memory.h> @@ -45,6 +46,7 @@ #include "mm.h" phys_addr_t memstart_addr __read_mostly = 0; +phys_addr_t arm64_dma_phys_limit __read_mostly; #ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) @@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) { - max_dma = PFN_DOWN(max_zone_dma_phys()); + max_dma = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; } zone_size[ZONE_NORMAL] = max - max_dma; @@ -156,8 +158,6 @@ early_param("mem", early_mem); void __init arm64_memblock_init(void) { - phys_addr_t dma_phys_limit = 0; - memblock_enforce_memory_limit(memory_limit); /* @@ -174,8 +174,10 @@ void __init arm64_memblock_init(void) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) - dma_phys_limit = max_zone_dma_phys(); - dma_contiguous_reserve(dma_phys_limit); + arm64_dma_phys_limit = max_zone_dma_phys(); + else + arm64_dma_phys_limit = PHYS_MASK + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); memblock_dump_all(); @@ -276,6 +278,8 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { + swiotlb_init(1); + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); #ifndef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 245b2ee213c9..a46f7cf3e1ea 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -26,7 +26,7 @@ typedef struct { * For historical reasons (Data Segment Register?), these macros are misnamed. */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define segment_eq(a,b) ((a).is_user_space == (b).is_user_space) +#define segment_eq(a, b) ((a).is_user_space == (b).is_user_space) #define USER_ADDR_LIMIT 0x80000000 @@ -108,8 +108,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ - __put_user_check((x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((x), (ptr), sizeof(*(ptr))) /* * get_user: - Get a simple variable from user space. @@ -128,8 +128,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) /* * __put_user: - Write a simple value into user space, with less checking. @@ -150,8 +150,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((x), (ptr), sizeof(*(ptr))) /* * __get_user: - Get a simple variable from user space, with less checking. @@ -173,8 +173,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) extern int __get_user_bad(void); extern int __put_user_bad(void); @@ -191,7 +191,7 @@ extern int __put_user_bad(void); default: __gu_err = __get_user_bad(); break; \ } \ \ - x = (typeof(*(ptr)))__gu_val; \ + x = (__force typeof(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -222,7 +222,7 @@ extern int __put_user_bad(void); } else { \ __gu_err = -EFAULT; \ } \ - x = (typeof(*(ptr)))__gu_val; \ + x = (__force typeof(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -278,7 +278,7 @@ extern int __put_user_bad(void); __pu_err); \ break; \ case 8: \ - __put_user_asm("d", __pu_addr, __pu_val, \ + __put_user_asm("d", __pu_addr, __pu_val, \ __pu_err); \ break; \ default: \ diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index cc92cdb9994c..1d8b147282cf 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -607,7 +607,7 @@ static struct dw_dma_platform_data dw_dmac0_data = { .nr_channels = 3, .block_size = 4095U, .nr_masters = 2, - .data_width = { 2, 2, 0, 0 }, + .data_width = { 2, 2 }, }; static struct resource dw_dmac0_resource[] = { diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/arch/blackfin/include/asm/bfin_rotary.h deleted file mode 100644 index 8895a750c70c..000000000000 --- a/arch/blackfin/include/asm/bfin_rotary.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * board initialization should put one of these structures into platform_data - * and place the bfin-rotary onto platform_bus named "bfin-rotary". - * - * Copyright 2008-2010 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#ifndef _BFIN_ROTARY_H -#define _BFIN_ROTARY_H - -/* mode bitmasks */ -#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */ -#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */ -#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */ -#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */ - -#define ROT_DEBE DEBE /* Debounce Enable */ - -#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */ -#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */ -#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */ - -struct bfin_rotary_platform_data { - /* set rotary UP KEY_### or BTN_### in case you prefer - * bfin-rotary to send EV_KEY otherwise set 0 - */ - unsigned int rotary_up_key; - /* set rotary DOWN KEY_### or BTN_### in case you prefer - * bfin-rotary to send EV_KEY otherwise set 0 - */ - unsigned int rotary_down_key; - /* set rotary BUTTON KEY_### or BTN_### */ - unsigned int rotary_button_key; - /* set rotary Relative Axis REL_### in case you prefer - * bfin-rotary to send EV_REL otherwise set 0 - */ - unsigned int rotary_rel_code; - unsigned short debounce; /* 0..17 */ - unsigned short mode; - unsigned short pm_wakeup; -}; - -/* CNT_CONFIG bitmasks */ -#define CNTE (1 << 0) /* Counter Enable */ -#define DEBE (1 << 1) /* Debounce Enable */ -#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */ -#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */ -#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */ -#define CNTMODE_SHIFT 8 -#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */ -#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */ -#define BNDMODE_SHIFT 12 -#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */ -#define INPDIS (1 << 15) /* CUG and CDG Input Disable */ - -#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */ -#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */ -#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */ -#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */ -#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */ - -#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */ -#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */ -#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */ -#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */ - -/* CNT_IMASK bitmasks */ -#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */ -#define UCIE (1 << 1) /* Up count Interrupt Enable */ -#define DCIE (1 << 2) /* Down count Interrupt Enable */ -#define MINCIE (1 << 3) /* Min Count Interrupt Enable */ -#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */ -#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */ -#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */ -#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */ -#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */ -#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */ -#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */ - -/* CNT_STATUS bitmasks */ -#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */ -#define UCII (1 << 1) /* Up count Interrupt Identifier */ -#define DCII (1 << 2) /* Down count Interrupt Identifier */ -#define MINCII (1 << 3) /* Min Count Interrupt Identifier */ -#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */ -#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */ -#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */ -#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */ -#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */ -#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */ -#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */ - -/* CNT_COMMAND bitmasks */ -#define W1LCNT 0xf /* Load Counter Register */ -#define W1LMIN 0xf0 /* Load Min Register */ -#define W1LMAX 0xf00 /* Load Max Register */ -#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */ - -#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */ -#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */ -#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */ - -#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */ -#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */ -#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */ - -#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */ -#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */ -#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */ - -/* CNT_DEBOUNCE bitmasks */ -#define DPRESCALE 0xf /* Load Counter Register */ - -#endif diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 57701c3b8a59..90612a7f2cf3 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -27,7 +27,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -68,11 +68,11 @@ struct exception_table_entry { * use the right size if we just have the right pointer type. */ -#define put_user(x,p) \ +#define put_user(x, p) \ ({ \ int _err = 0; \ typeof(*(p)) _x = (x); \ - typeof(*(p)) __user *_p = (p); \ + typeof(*(p)) __user *_p = (p); \ if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ _err = -EFAULT; \ } \ @@ -89,10 +89,10 @@ struct exception_table_entry { break; \ case 8: { \ long _xl, _xh; \ - _xl = ((long *)&_x)[0]; \ - _xh = ((long *)&_x)[1]; \ - __put_user_asm(_xl, ((long __user *)_p)+0, ); \ - __put_user_asm(_xh, ((long __user *)_p)+1, ); \ + _xl = ((__force long *)&_x)[0]; \ + _xh = ((__force long *)&_x)[1]; \ + __put_user_asm(_xl, ((__force long __user *)_p)+0, );\ + __put_user_asm(_xh, ((__force long __user *)_p)+1, );\ } break; \ default: \ _err = __put_user_bad(); \ @@ -102,7 +102,7 @@ struct exception_table_entry { _err; \ }) -#define __put_user(x,p) put_user(x,p) +#define __put_user(x, p) put_user(x, p) static inline int bad_user_access_length(void) { panic("bad_user_access_length"); @@ -121,10 +121,10 @@ static inline int bad_user_access_length(void) #define __ptr(x) ((unsigned long __force *)(x)) -#define __put_user_asm(x,p,bhw) \ +#define __put_user_asm(x, p, bhw) \ __asm__ (#bhw"[%1] = %0;\n\t" \ : /* no outputs */ \ - :"d" (x),"a" (__ptr(p)) : "memory") + :"d" (x), "a" (__ptr(p)) : "memory") #define get_user(x, ptr) \ ({ \ @@ -136,10 +136,10 @@ static inline int bad_user_access_length(void) BUILD_BUG_ON(ptr_size >= 8); \ switch (ptr_size) { \ case 1: \ - __get_user_asm(_val, _p, B,(Z)); \ + __get_user_asm(_val, _p, B, (Z)); \ break; \ case 2: \ - __get_user_asm(_val, _p, W,(Z)); \ + __get_user_asm(_val, _p, W, (Z)); \ break; \ case 4: \ __get_user_asm(_val, _p, , ); \ @@ -147,11 +147,11 @@ static inline int bad_user_access_length(void) } \ } else \ _err = -EFAULT; \ - x = (typeof(*(ptr)))_val; \ + x = (__force typeof(*(ptr)))_val; \ _err; \ }) -#define __get_user(x,p) get_user(x,p) +#define __get_user(x, p) get_user(x, p) #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) @@ -168,10 +168,10 @@ static inline int bad_user_access_length(void) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user -#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\ +#define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\ return retval; }) -#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ +#define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\ return retval; }) static inline unsigned long __must_check diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c index 9501bd8d9cd1..68f2a8a806ea 100644 --- a/arch/blackfin/mach-bf527/boards/ad7160eval.c +++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c @@ -666,7 +666,14 @@ static struct platform_device bfin_sport1_uart_device = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include <asm/bfin_rotary.h> +#include <linux/platform_data/bfin_rotary.h> + +static const u16 per_cnt[] = { + P_CNT_CUD, + P_CNT_CDG, + P_CNT_CZM, + 0 +}; static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ @@ -676,10 +683,16 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { .debounce = 10, /* 0..17 */ .mode = ROT_QUAD_ENC | ROT_DEBE, .pm_wakeup = 1, + .pin_list = per_cnt, }; static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index d64f565dc2a0..d4219e8e5ab8 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -1092,7 +1092,14 @@ static struct platform_device bfin_device_gpiokeys = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include <asm/bfin_rotary.h> +#include <linux/platform_data/bfin_rotary.h> + +static const u16 per_cnt[] = { + P_CNT_CUD, + P_CNT_CDG, + P_CNT_CZM, + 0 +}; static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ @@ -1102,10 +1109,16 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { .debounce = 10, /* 0..17 */ .mode = ROT_QUAD_ENC | ROT_DEBE, .pm_wakeup = 1, + .pin_list = per_cnt, }; static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 1fe7ff286619..4204b9842532 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -159,7 +159,7 @@ static struct platform_device bf54x_kpad_device = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include <asm/bfin_rotary.h> +#include <linux/platform_data/bfin_rotary.h> static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ @@ -173,6 +173,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index e2c0b024ce88..7f9fc272ec30 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -75,7 +75,7 @@ static struct platform_device bfin_isp1760_device = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include <asm/bfin_rotary.h> +#include <linux/platform_data/bfin_rotary.h> static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ @@ -88,6 +88,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 93bcf2abd1a1..07d7a7ef8bd5 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -123,12 +123,14 @@ extern unsigned long empty_zero_page; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PTRS_PER_PGD 64 +#define __PAGETABLE_PUD_FOLDED #define PUD_SHIFT 26 #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE - 1)) #define PUE_SIZE 256 +#define __PAGETABLE_PMD_FOLDED #define PMD_SHIFT 26 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h index a2320a4a0042..4377c89a57f5 100644 --- a/arch/frv/include/asm/segment.h +++ b/arch/frv/include/asm/segment.h @@ -31,7 +31,7 @@ typedef struct { #define get_ds() (KERNEL_DS) #define get_fs() (__current_thread_info->addr_limit) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS) #define get_addr_limit() (get_fs().seg) diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 103bedc59644..4f3fb6ccbf21 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -169,10 +169,11 @@ do { \ (err) = ia64_getreg(_IA64_REG_R8); \ (val) = ia64_getreg(_IA64_REG_R9); \ } while (0) -# define __put_user_size(val, addr, n, err) \ -do { \ - __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ - (err) = ia64_getreg(_IA64_REG_R8); \ +# define __put_user_size(val, addr, n, err) \ +do { \ + __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ + (__force unsigned long) (val)); \ + (err) = ia64_getreg(_IA64_REG_R8); \ } while (0) #endif /* !ASM_SUPPORTED */ @@ -197,7 +198,7 @@ extern void __get_user_unknown (void); case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ default: __get_user_unknown(); break; \ } \ - (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ + (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ __gu_err; \ }) diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h index 8fd8ee70266a..421e6ba3a173 100644 --- a/arch/m32r/include/asm/pgtable-2level.h +++ b/arch/m32r/include/asm/pgtable-2level.h @@ -13,6 +13,7 @@ * the M32R is two-level, so we don't really have any * PMD directory physically. */ +#define __PAGETABLE_PMD_FOLDED #define PMD_SHIFT 22 #define PTRS_PER_PMD 1 diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 84fe7ba53035..71adff209405 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -54,7 +54,7 @@ static inline void set_fs(mm_segment_t s) #endif /* not CONFIG_MMU */ -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __addr_ok(addr) \ ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) @@ -68,7 +68,7 @@ static inline void set_fs(mm_segment_t s) * * This needs 33-bit arithmetic. We have a carry... */ -#define __range_ok(addr,size) ({ \ +#define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm ( \ @@ -103,7 +103,7 @@ static inline void set_fs(mm_segment_t s) * this function, memory access functions may still return -EFAULT. */ #ifdef CONFIG_MMU -#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) +#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) #else static inline int access_ok(int type, const void *addr, unsigned long size) { @@ -167,8 +167,8 @@ extern int fixup_exception(struct pt_regs *regs); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) /** * put_user: - Write a simple value into user space. @@ -186,8 +186,8 @@ extern int fixup_exception(struct pt_regs *regs); * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) /** * __get_user: - Get a simple variable from user space, with less checking. @@ -209,41 +209,41 @@ extern int fixup_exception(struct pt_regs *regs); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err = 0; \ unsigned long __gu_val; \ might_fault(); \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ,__gu_addr,size)) \ - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ +#define __get_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"ub"); break; \ - case 2: __get_user_asm(x,ptr,retval,"uh"); break; \ - case 4: __get_user_asm(x,ptr,retval,""); break; \ + case 1: __get_user_asm(x, ptr, retval, "ub"); break; \ + case 2: __get_user_asm(x, ptr, retval, "uh"); break; \ + case 4: __get_user_asm(x, ptr, retval, ""); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) @@ -288,26 +288,26 @@ do { \ * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ might_fault(); \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ +#define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ }) @@ -366,15 +366,15 @@ do { \ extern void __put_user_bad(void); -#define __put_user_size(x,ptr,size,retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b"); break; \ - case 2: __put_user_asm(x,ptr,retval,"h"); break; \ - case 4: __put_user_asm(x,ptr,retval,""); break; \ - case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ + case 1: __put_user_asm(x, ptr, retval, "b"); break; \ + case 2: __put_user_asm(x, ptr, retval, "h"); break; \ + case 4: __put_user_asm(x, ptr, retval, ""); break; \ + case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\ default: __put_user_bad(); \ } \ } while (0) @@ -421,7 +421,7 @@ struct __large_struct { unsigned long buf[100]; }; /* Generic arbitrary sized copy. */ /* Return the number of bytes NOT copied. */ -#define __copy_user(to,from,size) \ +#define __copy_user(to, from, size) \ do { \ unsigned long __dst, __src, __c; \ __asm__ __volatile__ ( \ @@ -478,7 +478,7 @@ do { \ : "r14", "memory"); \ } while (0) -#define __copy_user_zeroing(to,from,size) \ +#define __copy_user_zeroing(to, from, size) \ do { \ unsigned long __dst, __src, __c; \ __asm__ __volatile__ ( \ @@ -548,14 +548,14 @@ do { \ static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void __user *from, unsigned long n) { - __copy_user_zeroing(to,from,n); + __copy_user_zeroing(to, from, n); return n; } static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, const void *from, unsigned long n) { - __copy_user(to,from,n); + __copy_user(to, from, n); return n; } @@ -576,8 +576,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * Returns number of bytes that could not be copied. * On success, this will be zero. */ -#define __copy_to_user(to,from,n) \ - __generic_copy_to_user_nocheck((to),(from),(n)) +#define __copy_to_user(to, from, n) \ + __generic_copy_to_user_nocheck((to), (from), (n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user @@ -595,10 +595,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * Returns number of bytes that could not be copied. * On success, this will be zero. */ -#define copy_to_user(to,from,n) \ +#define copy_to_user(to, from, n) \ ({ \ might_fault(); \ - __generic_copy_to_user((to),(from),(n)); \ + __generic_copy_to_user((to), (from), (n)); \ }) /** @@ -617,8 +617,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -#define __copy_from_user(to,from,n) \ - __generic_copy_from_user_nocheck((to),(from),(n)) +#define __copy_from_user(to, from, n) \ + __generic_copy_from_user_nocheck((to), (from), (n)) /** * copy_from_user: - Copy a block of data from user space. @@ -636,10 +636,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -#define copy_from_user(to,from,n) \ +#define copy_from_user(to, from, n) \ ({ \ might_fault(); \ - __generic_copy_from_user((to),(from),(n)); \ + __generic_copy_from_user((to), (from), (n)); \ }) long __must_check strncpy_from_user(char *dst, const char __user *src, diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 28a145bfbb71..35ed4a9981ae 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -54,10 +54,12 @@ */ #ifdef CONFIG_SUN3 #define PTRS_PER_PTE 16 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 #elif defined(CONFIG_COLDFIRE) #define PTRS_PER_PTE 512 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index 0fa80e97ed2d..98216b8111f0 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -58,7 +58,7 @@ static inline mm_segment_t get_ds(void) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 15901db435b9..d228601b3afc 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -128,25 +128,25 @@ asm volatile ("\n" \ #define put_user(x, ptr) __put_user(x, ptr) -#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ - type __gu_val; \ - asm volatile ("\n" \ - "1: "MOVES"."#bwl" %2,%1\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: move.l %3,%0\n" \ - " sub.l %1,%1\n" \ - " jra 2b\n" \ - " .previous\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10b\n" \ - " .previous" \ - : "+d" (res), "=&" #reg (__gu_val) \ - : "m" (*(ptr)), "i" (err)); \ - (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ +#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ + type __gu_val; \ + asm volatile ("\n" \ + "1: "MOVES"."#bwl" %2,%1\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: move.l %3,%0\n" \ + " sub.l %1,%1\n" \ + " jra 2b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .previous" \ + : "+d" (res), "=&" #reg (__gu_val) \ + : "m" (*(ptr)), "i" (err)); \ + (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ }) #define __get_user(x, ptr) \ @@ -188,7 +188,7 @@ asm volatile ("\n" \ "+a" (__gu_ptr) \ : "i" (-EFAULT) \ : "memory"); \ - (x) = (typeof(*(ptr)))__gu_val; \ + (x) = (__force typeof(*(ptr)))__gu_val; \ break; \ } */ \ default: \ diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 881071c07942..13272fd5a5ba 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -149,8 +149,8 @@ extern void exit_thread(void); unsigned long get_wchan(struct task_struct *p); -#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) -#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 0748b0a97986..8282cbce7e39 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -107,18 +107,23 @@ extern long __put_user_asm_w(unsigned int x, void __user *addr); extern long __put_user_asm_d(unsigned int x, void __user *addr); extern long __put_user_asm_l(unsigned long long x, void __user *addr); -#define __put_user_size(x, ptr, size, retval) \ -do { \ - retval = 0; \ - switch (size) { \ +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ case 1: \ - retval = __put_user_asm_b((unsigned int)x, ptr); break; \ + retval = __put_user_asm_b((__force unsigned int)x, ptr);\ + break; \ case 2: \ - retval = __put_user_asm_w((unsigned int)x, ptr); break; \ + retval = __put_user_asm_w((__force unsigned int)x, ptr);\ + break; \ case 4: \ - retval = __put_user_asm_d((unsigned int)x, ptr); break; \ + retval = __put_user_asm_d((__force unsigned int)x, ptr);\ + break; \ case 8: \ - retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ + retval = __put_user_asm_l((__force unsigned long long)x,\ + ptr); \ + break; \ default: \ __put_user_bad(); \ } \ @@ -135,7 +140,7 @@ extern long __get_user_bad(void); ({ \ long __gu_err, __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -145,7 +150,7 @@ extern long __get_user_bad(void); const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ if (access_ok(VERIFY_READ, __gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 843713c05b79..c7a16904cd03 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -54,6 +54,7 @@ config MIPS select CPU_PM if CPU_IDLE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_BINFMT_ELF_STATE + select SYSCTL_EXCEPTION_TRACE menu "Machine selection" @@ -376,8 +377,10 @@ config MIPS_MALTA select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_CPU_MIPS32_R3_5 + select SYS_HAS_CPU_MIPS32_R6 select SYS_HAS_CPU_MIPS64_R1 select SYS_HAS_CPU_MIPS64_R2 + select SYS_HAS_CPU_MIPS64_R6 select SYS_HAS_CPU_NEVADA select SYS_HAS_CPU_RM7000 select SYS_SUPPORTS_32BIT_KERNEL @@ -1033,6 +1036,9 @@ config MIPS_MACHINE config NO_IOPORT_MAP def_bool n +config GENERIC_CSUM + bool + config GENERIC_ISA_DMA bool select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n @@ -1146,6 +1152,9 @@ config SOC_PNX8335 bool select SOC_PNX833X +config MIPS_SPRAM + bool + config SWAP_IO_SPACE bool @@ -1304,6 +1313,22 @@ config CPU_MIPS32_R2 specific type of processor in your system, choose those that one otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. +config CPU_MIPS32_R6 + bool "MIPS32 Release 6 (EXPERIMENTAL)" + depends on SYS_HAS_CPU_MIPS32_R6 + select CPU_HAS_PREFETCH + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_MSA + select GENERIC_CSUM + select HAVE_KVM + select MIPS_O32_FP64_SUPPORT + help + Choose this option to build a kernel for release 6 or later of the + MIPS32 architecture. New MIPS processors, starting with the Warrior + family, are based on a MIPS32r6 processor. If you own an older + processor, you probably need to select MIPS32r1 or MIPS32r2 instead. + config CPU_MIPS64_R1 bool "MIPS64 Release 1" depends on SYS_HAS_CPU_MIPS64_R1 @@ -1339,6 +1364,21 @@ config CPU_MIPS64_R2 specific type of processor in your system, choose those that one otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. +config CPU_MIPS64_R6 + bool "MIPS64 Release 6 (EXPERIMENTAL)" + depends on SYS_HAS_CPU_MIPS64_R6 + select CPU_HAS_PREFETCH + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_64BIT_KERNEL + select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_MSA + select GENERIC_CSUM + help + Choose this option to build a kernel for release 6 or later of the + MIPS64 architecture. New MIPS processors, starting with the Warrior + family, are based on a MIPS64r6 processor. If you own an older + processor, you probably need to select MIPS64r1 or MIPS64r2 instead. + config CPU_R3000 bool "R3000" depends on SYS_HAS_CPU_R3000 @@ -1539,7 +1579,7 @@ endchoice config CPU_MIPS32_3_5_FEATURES bool "MIPS32 Release 3.5 Features" depends on SYS_HAS_CPU_MIPS32_R3_5 - depends on CPU_MIPS32_R2 + depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 help Choose this option to build a kernel for release 2 or later of the MIPS32 architecture including features from the 3.5 release such as @@ -1659,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2 config SYS_HAS_CPU_MIPS32_R3_5 bool +config SYS_HAS_CPU_MIPS32_R6 + bool + config SYS_HAS_CPU_MIPS64_R1 bool config SYS_HAS_CPU_MIPS64_R2 bool +config SYS_HAS_CPU_MIPS64_R6 + bool + config SYS_HAS_CPU_R3000 bool @@ -1764,11 +1810,11 @@ endmenu # config CPU_MIPS32 bool - default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 + default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 config CPU_MIPS64 bool - default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 + default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6 # # These two indicate the revision of the architecture, either Release 1 or Release 2 @@ -1780,6 +1826,12 @@ config CPU_MIPSR1 config CPU_MIPSR2 bool default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON + select MIPS_SPRAM + +config CPU_MIPSR6 + bool + default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 + select MIPS_SPRAM config EVA bool @@ -2013,6 +2065,19 @@ config MIPS_MT_FPAFF default y depends on MIPS_MT_SMP +config MIPSR2_TO_R6_EMULATOR + bool "MIPS R2-to-R6 emulator" + depends on CPU_MIPSR6 && !SMP + default y + help + Choose this option if you want to run non-R6 MIPS userland code. + Even if you say 'Y' here, the emulator will still be disabled by + default. You can enable it using the 'mipsr2emul' kernel option. + The only reason this is a build-time option is to save ~14K from the + final kernel image. +comment "MIPS R2-to-R6 emulator is only available for UP kernels" + depends on SMP && CPU_MIPSR6 + config MIPS_VPE_LOADER bool "VPE loader support." depends on SYS_SUPPORTS_MULTITHREADING && MODULES @@ -2148,7 +2213,7 @@ config CPU_HAS_SMARTMIPS here. config CPU_MICROMIPS - depends on 32BIT && SYS_SUPPORTS_MICROMIPS + depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 bool "microMIPS" help When this option is enabled the kernel will be built using the diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 88a9f433f6fc..3a2b775e8458 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -122,17 +122,4 @@ config SPINLOCK_TEST help Add several files to the debugfs to test spinlock speed. -config FP32XX_HYBRID_FPRS - bool "Run FP32 & FPXX code with hybrid FPRs" - depends on MIPS_O32_FP64_SUPPORT - help - The hybrid FPR scheme is normally used only when a program needs to - execute a mix of FP32 & FP64A code, since the trapping & emulation - that it entails is expensive. When enabled, this option will lead - to the kernel running programs which use the FP32 & FPXX FP ABIs - using the hybrid FPR scheme, which can be useful for debugging - purposes. - - If unsure, say N. - endmenu diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 2563a088d3b8..8f57fc72d62c 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) -# For smartmips configurations, there are hundreds of warnings due to ISA overrides -# in assembly and header files. smartmips is only supported for MIPS32r1 onwards -# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or -# similar directives in the kernel will spam the build logs with the following warnings: -# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater -# or -# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension -# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has -# been fixed properly. -cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn -cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) - cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ -fno-omit-frame-pointer - -ifeq ($(CONFIG_CPU_HAS_MSA),y) -toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) -cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA -endif - # # CPU-dependent compiler/assembler options for optimization. # @@ -156,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS -Wa,-mips32 -Wa,--trap cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ -Wa,-mips32r2 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ -Wa,-mips64 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ -Wa,-mips64r2 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ -Wa,--trap @@ -182,6 +166,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon endif cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap +# +# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a +# as MIPS64 R1; older versions as just R1. This leaves the possibility open +# that GCC might generate R2 code for -march=loongson3a which then is rejected +# by GAS. The cc-option can't probe for this behaviour so -march=loongson3a +# can't easily be used safely within the kbuild framework. +# +cflags-$(CONFIG_CPU_LOONGSON3) += \ + $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ + -Wa,-mips64r2 -Wa,--trap cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) @@ -194,6 +188,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds endif endif +# For smartmips configurations, there are hundreds of warnings due to ISA overrides +# in assembly and header files. smartmips is only supported for MIPS32r1 onwards +# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or +# similar directives in the kernel will spam the build logs with the following warnings: +# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater +# or +# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension +# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has +# been fixed properly. +mips-cflags := "$(cflags-y)" +cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn +cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) +ifeq ($(CONFIG_CPU_HAS_MSA),y) +toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) +cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA +endif + # # Firmware support # @@ -287,7 +298,11 @@ boot-y += vmlinux.ecoff boot-y += vmlinux.srec ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) boot-y += uImage +boot-y += uImage.bin +boot-y += uImage.bz2 boot-y += uImage.gz +boot-y += uImage.lzma +boot-y += uImage.lzo endif # compressed boot image targets (arch/mips/boot/compressed/) @@ -386,7 +401,11 @@ define archhelp echo ' vmlinuz.bin - Raw binary zboot image' echo ' vmlinuz.srec - SREC zboot image' echo ' uImage - U-Boot image' + echo ' uImage.bin - U-Boot image (uncompressed)' + echo ' uImage.bz2 - U-Boot image (bz2)' echo ' uImage.gz - U-Boot image (gzip)' + echo ' uImage.lzma - U-Boot image (lzma)' + echo ' uImage.lzo - U-Boot image (lzo)' echo ' dtbs - Device-tree blobs for enabled boards' echo echo ' These will be default as appropriate for a configured platform.' diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 48a9dfc55b51..6a98d2cb402c 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -127,12 +127,20 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, t = 396000000; else { t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; + if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) + t &= 0x3f; t *= parent_rate; } return t; } +void __init alchemy_set_lpj(void) +{ + preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); + preset_lpj /= 2 * HZ; +} + static struct clk_ops alchemy_clkops_cpu = { .recalc_rate = alchemy_clk_cpu_recalc, }; @@ -315,17 +323,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) /* lrclk: external synchronous static bus clock ***********************/ -static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) +static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) { - /* MEM_STCFG0[15:13] = divisor. + /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, + * otherwise lrclk=pclk/4. + * All other variants: MEM_STCFG0[15:13] = divisor. * L/RCLK = periph_clk / (divisor + 1) * On Au1000, Au1500, Au1100 it's called LCLK, * on later models it's called RCLK, but it's the same thing. */ struct clk *c; - unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; + unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); - v = (v & 7) + 1; + switch (t) { + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + v = 4 + ((v >> 11) & 1); + break; + default: /* all other models */ + v = ((v >> 13) & 7) + 1; + } c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, pn, 0, 1, v); if (!IS_ERR(c)) @@ -546,6 +563,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, } static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { @@ -678,6 +697,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, } static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { @@ -897,6 +918,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, } static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { @@ -1060,7 +1083,7 @@ static int __init alchemy_clk_init(void) ERRCK(c) /* L/RCLK: external static bus clock for synchronous mode */ - c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); + c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); ERRCK(c) /* Frequency dividers 0-5 */ diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 4e72daf12c32..2902138b3e0f 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c @@ -34,10 +34,12 @@ #include <au1000.h> extern void __init board_setup(void); -extern void set_cpuspec(void); +extern void __init alchemy_set_lpj(void); void __init plat_mem_setup(void) { + alchemy_set_lpj(); + if (au1xxx_cpu_needs_config_od()) /* Various early Au1xx0 errata corrected by this */ set_c0_config(1 << 19); /* Set Config[OD] */ diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c index 0fb5134fb832..fd94fe849af6 100644 --- a/arch/mips/bcm3384/irq.c +++ b/arch/mips/bcm3384/irq.c @@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node, static struct of_device_id of_irq_ids[] __initdata = { { .compatible = "mti,cpu-interrupt-controller", - .data = mips_cpu_intc_init }, + .data = mips_cpu_irq_of_init }, { .compatible = "brcm,bcm3384-intc", .data = intc_of_init }, {}, diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 1466c0026093..acb1988f354e 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -23,6 +23,12 @@ strip-flags := $(addprefix --remove-section=,$(drop-sections)) hostprogs-y := elf2ecoff +suffix-y := bin +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_LZMA) := lzma +suffix-$(CONFIG_KERNEL_LZO) := lzo + targets := vmlinux.ecoff quiet_cmd_ecoff = ECOFF $@ cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) @@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) +# +# Compressed vmlinux images +# + +extra-y += vmlinux.bin.bz2 +extra-y += vmlinux.bin.gz +extra-y += vmlinux.bin.lzma +extra-y += vmlinux.bin.lzo + +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + +$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzo) + +# +# Compressed u-boot images +# + +targets += uImage +targets += uImage.bin +targets += uImage.bz2 targets += uImage.gz +targets += uImage.lzma +targets += uImage.lzo + +$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE + $(call if_changed,uimage,none) + +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE + $(call if_changed,uimage,bzip2) + $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE $(call if_changed,uimage,gzip) -targets += uImage -$(obj)/uImage: $(obj)/uImage.gz FORCE +$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE + $(call if_changed,uimage,lzma) + +$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE + $(call if_changed,uimage,lzo) + +$(obj)/uImage: $(obj)/uImage.$(suffix-y) @ln -sf $(notdir $<) $@ @echo ' Image $@ is ready' diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c index 2a4c52e27f41..266c8137e859 100644 --- a/arch/mips/boot/elf2ecoff.c +++ b/arch/mips/boot/elf2ecoff.c @@ -268,7 +268,6 @@ int main(int argc, char *argv[]) Elf32_Ehdr ex; Elf32_Phdr *ph; Elf32_Shdr *sh; - char *shstrtab; int i, pad; struct sect text, data, bss; struct filehdr efh; @@ -336,9 +335,6 @@ int main(int argc, char *argv[]) "sh"); if (must_convert_endian) convert_elf_shdrs(sh, ex.e_shnum); - /* Read in the section string table. */ - shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, - sh[ex.e_shstrndx].sh_size, "shstrtab"); /* Figure out if we can cram the program header into an ECOFF header... Basically, we can't handle anything but loadable diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index b752c4ed0b79..1882e6475dd0 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c @@ -18,7 +18,7 @@ #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-ipd-defs.h> #include <asm/octeon/cvmx-mio-defs.h> - +#include <asm/octeon/cvmx-rst-defs.h> static u64 f; static u64 rdiv; @@ -39,11 +39,20 @@ void __init octeon_setup_delays(void) if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { union cvmx_mio_rst_boot rst_boot; + rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); rdiv = rst_boot.s.c_mul; /* CPU clock */ sdiv = rst_boot.s.pnr_mul; /* I/O clock */ f = (0x8000000000000000ull / sdiv) * 2; + } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { + union cvmx_rst_boot rst_boot; + + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); + rdiv = rst_boot.s.c_mul; /* CPU clock */ + sdiv = rst_boot.s.pnr_mul; /* I/O clock */ + f = (0x8000000000000000ull / sdiv) * 2; } + } /* diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 3778655c4a37..7d8987818ccf 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void) continue; /* These addresses map low for PCI. */ - if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) + if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2()) continue; addr_size += e->size; @@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void) #endif #ifdef CONFIG_USB_OCTEON_OHCI /* OCTEON II ohci is only 32-bit. */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) + if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) swiotlbsize = 64 * (1<<20); #endif swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 5dfef84b9576..9eb0feef4417 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo break; } /* Most boards except NIC10e use a 12MHz crystal */ - if (OCTEON_IS_MODEL(OCTEON_FAM_2)) + if (OCTEON_IS_OCTEON2()) return USB_CLOCK_TYPE_CRYSTAL_12; return USB_CLOCK_TYPE_REF_48; } diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 2bc4aa95944e..10f762557b92 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -3,12 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2004-2012 Cavium, Inc. + * Copyright (C) 2004-2014 Cavium, Inc. */ +#include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/bitops.h> +#include <linux/of_irq.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/irq.h> @@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); +struct octeon_irq_ciu_domain_data { + int num_sum; /* number of sum registers (2 or 3). */ +}; + static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; -union octeon_ciu_chip_data { - void *p; - unsigned long l; - struct { - unsigned long line:6; - unsigned long bit:6; - unsigned long gpio_line:6; - } s; +struct octeon_ciu_chip_data { + union { + struct { /* only used for ciu3 */ + u64 ciu3_addr; + unsigned int intsn; + }; + struct { /* only used for ciu/ciu2 */ + u8 line; + u8 bit; + u8 gpio_line; + }; + }; + int current_cpu; /* Next CPU expected to take this irq */ }; struct octeon_core_chip_data { @@ -45,27 +56,40 @@ struct octeon_core_chip_data { static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; -static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, - struct irq_chip *chip, - irq_flow_handler_t handler) +static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, + struct irq_chip *chip, + irq_flow_handler_t handler) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + return -ENOMEM; irq_set_chip_and_handler(irq, chip, handler); - cd.l = 0; - cd.s.line = line; - cd.s.bit = bit; - cd.s.gpio_line = gpio_line; + cd->line = line; + cd->bit = bit; + cd->gpio_line = gpio_line; - irq_set_chip_data(irq, cd.p); + irq_set_chip_data(irq, cd); octeon_irq_ciu_to_irq[line][bit] = irq; + return 0; } -static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, - int irq, int line, int bit) +static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) { - irq_domain_associate(domain, irq, line << 6 | bit); + struct irq_data *data = irq_get_irq_data(irq); + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); + + irq_set_chip_data(irq, NULL); + kfree(cd); +} + +static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, + int irq, int line, int bit) +{ + return irq_domain_associate(domain, irq, line << 6 | bit); } static int octeon_coreid_for_cpu(int cpu) @@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data) #ifdef CONFIG_SMP int cpu; int weight = cpumask_weight(data->affinity); + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); if (weight > 1) { - cpu = smp_processor_id(); + cpu = cd->current_cpu; for (;;) { cpu = cpumask_next(cpu, data->affinity); if (cpu >= nr_cpu_ids) { @@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data) } else { cpu = smp_processor_id(); } + cd->current_cpu = cpu; return cpu; #else return smp_processor_id(); @@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data) int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen; unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) { + if (cd->line == 0) { pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); } else { pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) { + if (cd->line == 0) { pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); } else { pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) { + if (cd->line == 0) { pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); } else { pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data) unsigned long flags; unsigned long *pen; int cpu; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock; - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - if (cd.s.line == 0) + if (cd->line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); raw_spin_lock_irqsave(lock, flags); - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. */ wmb(); - if (cd.s.line == 0) + if (cd->line == 0) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); @@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data) unsigned long flags; unsigned long *pen; int cpu; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock; - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - if (cd.s.line == 0) + if (cd->line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); raw_spin_lock_irqsave(lock, flags); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. */ wmb(); - if (cd.s.line == 0) + if (cd->line == 0) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); @@ -397,45 +423,106 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data) { u64 mask; int cpu = next_cpu_for_irq(data); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); /* * Called under the desc lock, so these should never get out * of sync. */ - if (cd.s.line == 0) { + if (cd->line == 0) { int index = octeon_coreid_for_cpu(cpu) * 2; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); + set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); + set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } /* + * Enable the irq in the sum2 registers. + */ +static void octeon_irq_ciu_enable_sum2(struct irq_data *data) +{ + u64 mask; + int cpu = next_cpu_for_irq(data); + int index = octeon_coreid_for_cpu(cpu); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); +} + +/* + * Disable the irq in the sum2 registers. + */ +static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) +{ + u64 mask; + int cpu = next_cpu_for_irq(data); + int index = octeon_coreid_for_cpu(cpu); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); +} + +static void octeon_irq_ciu_ack_sum2(struct irq_data *data) +{ + u64 mask; + int cpu = next_cpu_for_irq(data); + int index = octeon_coreid_for_cpu(cpu); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); +} + +static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) +{ + int cpu; + struct octeon_ciu_chip_data *cd; + u64 mask; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + for_each_online_cpu(cpu) { + int coreid = octeon_coreid_for_cpu(cpu); + + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); + } +} + +/* * Enable the irq on the current CPU for chips that * have the EN*_W1{S,C} registers. */ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) { u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { int index = cvmx_get_core_num() * 2; - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } @@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) { u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { int index = cvmx_get_core_num() * 2; - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } @@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) static void octeon_irq_ciu_ack(struct irq_data *data) { u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { int index = cvmx_get_core_num() * 2; cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); } else { @@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) { int cpu; u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2; - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); + clear_bit(cd->bit, + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } } else { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); + clear_bit(cd->bit, + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } @@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) { int cpu; u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); + set_bit(cd->bit, + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } } else { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); + set_bit(cd->bit, + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } @@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) static void octeon_irq_gpio_setup(struct irq_data *data) { union cvmx_gpio_bit_cfgx cfg; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; u32 t = irqd_get_trigger_type(data); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); cfg.u64 = 0; cfg.s.int_en = 1; @@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data) cfg.s.fil_cnt = 7; cfg.s.fil_sel = 3; - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); } static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) @@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + cd = irq_data_get_irq_chip_data(data); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); octeon_irq_ciu_disable_all_v2(data); } static void octeon_irq_ciu_disable_gpio(struct irq_data *data) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + cd = irq_data_get_irq_chip_data(data); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); octeon_irq_ciu_disable_all(data); } static void octeon_irq_ciu_gpio_ack(struct irq_data *data) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; u64 mask; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.gpio_line); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->gpio_line); cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); } -static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) +static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) { if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) handle_edge_irq(irq, desc); @@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; unsigned long *pen; raw_spinlock_t *lock; - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); /* * For non-v2 CIU, we will allow only single CPU affinity. @@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) + if (cd->line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = 0; - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); } else { - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); } /* * Must be visible to octeon_irq_ip{2,3}_ciu() before @@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, */ wmb(); - if (cd.s.line == 0) + if (cd->line == 0) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); @@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; if (!enable_one) return 0; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << cd.s.bit; + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << cd->bit; - if (cd.s.line == 0) { + if (cd->line == 0) { for_each_online_cpu(cpu) { unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); int index = octeon_coreid_for_cpu(cpu) * 2; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; - set_bit(cd.s.bit, pen); + set_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { - clear_bit(cd.s.bit, pen); + clear_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } } @@ -733,16 +824,44 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, int index = octeon_coreid_for_cpu(cpu) * 2 + 1; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; - set_bit(cd.s.bit, pen); + set_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } else { - clear_bit(cd.s.bit, pen); + clear_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } } return 0; } + +static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + int cpu; + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); + u64 mask; + struct octeon_ciu_chip_data *cd; + + if (!enable_one) + return 0; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << cd->bit; + + for_each_online_cpu(cpu) { + int index = octeon_coreid_for_cpu(cpu); + + if (cpumask_test_cpu(cpu, dest) && enable_one) { + enable_one = false; + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); + } else { + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); + } + } + return 0; +} #endif /* @@ -752,6 +871,18 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable_v2, .irq_disable = octeon_irq_ciu_disable_all_v2, + .irq_mask = octeon_irq_ciu_disable_local_v2, + .irq_unmask = octeon_irq_ciu_enable_v2, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu_v2_edge = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable_v2, + .irq_disable = octeon_irq_ciu_disable_all_v2, .irq_ack = octeon_irq_ciu_ack, .irq_mask = octeon_irq_ciu_disable_local_v2, .irq_unmask = octeon_irq_ciu_enable_v2, @@ -761,10 +892,50 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { #endif }; +/* + * Newer octeon chips have support for lockless CIU operation. + */ +static struct irq_chip octeon_irq_chip_ciu_sum2 = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable_sum2, + .irq_disable = octeon_irq_ciu_disable_all_sum2, + .irq_mask = octeon_irq_ciu_disable_local_sum2, + .irq_unmask = octeon_irq_ciu_enable_sum2, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable_sum2, + .irq_disable = octeon_irq_ciu_disable_all_sum2, + .irq_ack = octeon_irq_ciu_ack_sum2, + .irq_mask = octeon_irq_ciu_disable_local_sum2, + .irq_unmask = octeon_irq_ciu_enable_sum2, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + static struct irq_chip octeon_irq_chip_ciu = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable, .irq_disable = octeon_irq_ciu_disable_all, + .irq_mask = octeon_irq_ciu_disable_local, + .irq_unmask = octeon_irq_ciu_enable, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu_edge = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable, + .irq_disable = octeon_irq_ciu_disable_all, .irq_ack = octeon_irq_ciu_ack, .irq_mask = octeon_irq_ciu_disable_local, .irq_unmask = octeon_irq_ciu_enable, @@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, unsigned int *out_type) { unsigned int ciu, bit; + struct octeon_irq_ciu_domain_data *dd = d->host_data; ciu = intspec[0]; bit = intspec[1]; - if (ciu > 1 || bit > 63) + if (ciu >= dd->num_sum || bit > 63) return -EINVAL; *out_hwirq = (ciu << 6) | bit; @@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, } static struct irq_chip *octeon_irq_ciu_chip; +static struct irq_chip *octeon_irq_ciu_chip_edge; static struct irq_chip *octeon_irq_gpio_chip; static bool octeon_irq_virq_in_range(unsigned int virq) @@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq) static int octeon_irq_ciu_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { + int rv; unsigned int line = hw >> 6; unsigned int bit = hw & 63; + struct octeon_irq_ciu_domain_data *dd = d->host_data; if (!octeon_irq_virq_in_range(virq)) return -EINVAL; @@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d, if (line == 0 && bit >= 16 && bit <32) return 0; - if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) + if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; - if (octeon_irq_ciu_is_edge(line, bit)) - octeon_irq_set_ciu_mapping(virq, line, bit, 0, - octeon_irq_ciu_chip, - handle_edge_irq); - else - octeon_irq_set_ciu_mapping(virq, line, bit, 0, - octeon_irq_ciu_chip, - handle_level_irq); - - return 0; + if (line == 2) { + if (octeon_irq_ciu_is_edge(line, bit)) + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + &octeon_irq_chip_ciu_sum2_edge, + handle_edge_irq); + else + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + &octeon_irq_chip_ciu_sum2, + handle_level_irq); + } else { + if (octeon_irq_ciu_is_edge(line, bit)) + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + octeon_irq_ciu_chip_edge, + handle_edge_irq); + else + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + octeon_irq_ciu_chip, + handle_level_irq); + } + return rv; } -static int octeon_irq_gpio_map_common(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw, - int line_limit, struct irq_chip *chip) +static int octeon_irq_gpio_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) { struct octeon_irq_gpio_domain_data *gpiod = d->host_data; unsigned int line, bit; + int r; if (!octeon_irq_virq_in_range(virq)) return -EINVAL; line = (hw + gpiod->base_hwirq) >> 6; bit = (hw + gpiod->base_hwirq) & 63; - if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) + if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || + octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; - octeon_irq_set_ciu_mapping(virq, line, bit, hw, - chip, octeon_irq_handle_gpio); - return 0; -} - -static int octeon_irq_gpio_map(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw) -{ - return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); + r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, + octeon_irq_gpio_chip, octeon_irq_handle_trigger); + return r; } static struct irq_domain_ops octeon_irq_domain_ciu_ops = { .map = octeon_irq_ciu_map, + .unmap = octeon_irq_free_cd, .xlate = octeon_irq_ciu_xlat, }; static struct irq_domain_ops octeon_irq_domain_gpio_ops = { .map = octeon_irq_gpio_map, + .unmap = octeon_irq_free_cd, .xlate = octeon_irq_gpio_xlat, }; @@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void) } } +static void octeon_irq_ip4_ciu(void) +{ + int coreid = cvmx_get_core_num(); + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); + u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); + + ciu_sum &= ciu_en; + if (likely(ciu_sum)) { + int bit = fls64(ciu_sum) - 1; + int irq = octeon_irq_ciu_to_irq[2][bit]; + + if (likely(irq)) + do_IRQ(irq); + else + spurious_interrupt(); + } else { + spurious_interrupt(); + } +} + static bool octeon_irq_use_ip4; static void octeon_irq_local_enable_ip4(void *arg) @@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void) /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); - clear_c0_status(STATUSF_IP4); + if (octeon_irq_use_ip4) + set_c0_status(STATUSF_IP4); + else + clear_c0_status(STATUSF_IP4); } static void octeon_irq_setup_secondary_ciu2(void) @@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void) clear_c0_status(STATUSF_IP4); } -static void __init octeon_irq_init_ciu(void) +static int __init octeon_irq_init_ciu( + struct device_node *ciu_node, struct device_node *parent) { - unsigned int i; + unsigned int i, r; struct irq_chip *chip; + struct irq_chip *chip_edge; struct irq_chip *chip_mbox; struct irq_chip *chip_wd; - struct device_node *gpio_node; - struct device_node *ciu_node; struct irq_domain *ciu_domain = NULL; + struct octeon_irq_ciu_domain_data *dd; + + dd = kzalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; octeon_irq_init_ciu_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; octeon_irq_ip2 = octeon_irq_ip2_ciu; octeon_irq_ip3 = octeon_irq_ip3_ciu; + if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) + && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { + octeon_irq_ip4 = octeon_irq_ip4_ciu; + dd->num_sum = 3; + octeon_irq_use_ip4 = true; + } else { + octeon_irq_ip4 = octeon_irq_ip4_mask; + dd->num_sum = 2; + octeon_irq_use_ip4 = false; + } if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || - OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { chip = &octeon_irq_chip_ciu_v2; + chip_edge = &octeon_irq_chip_ciu_v2_edge; chip_mbox = &octeon_irq_chip_ciu_mbox_v2; chip_wd = &octeon_irq_chip_ciu_wd_v2; octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; } else { chip = &octeon_irq_chip_ciu; + chip_edge = &octeon_irq_chip_ciu_edge; chip_mbox = &octeon_irq_chip_ciu_mbox; chip_wd = &octeon_irq_chip_ciu_wd; octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; } octeon_irq_ciu_chip = chip; - octeon_irq_ip4 = octeon_irq_ip4_mask; + octeon_irq_ciu_chip_edge = chip_edge; /* Mips internal */ octeon_irq_init_core(); - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); - if (gpio_node) { - struct octeon_irq_gpio_domain_data *gpiod; - - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); - if (gpiod) { - /* gpio domain host_data is the base hwirq number. */ - gpiod->base_hwirq = 16; - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); - of_node_put(gpio_node); - } else - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); - } else - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); - - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); - if (ciu_node) { - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); - irq_set_default_host(ciu_domain); - of_node_put(ciu_node); - } else - panic("Cannot find device node for cavium,octeon-3860-ciu."); + ciu_domain = irq_domain_add_tree( + ciu_node, &octeon_irq_domain_ciu_ops, dd); + irq_set_default_host(ciu_domain); /* CIU_0 */ - for (i = 0; i < 16; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + for (i = 0; i < 16; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + if (r) + goto err; + } + + r = octeon_irq_set_ciu_mapping( + OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); + if (r) + goto err; + r = octeon_irq_set_ciu_mapping( + OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); + if (r) + goto err; + + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); + if (r) + goto err; + } + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + if (r) + goto err; + } - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); + if (r) + goto err; - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); + if (r) + goto err; - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + if (r) + goto err; + } + + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); + if (r) + goto err; - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); + if (r) + goto err; /* CIU_1 */ - for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); + for (i = 0; i < 16; i++) { + r = octeon_irq_set_ciu_mapping( + i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, + handle_level_irq); + if (r) + goto err; + } - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); + if (r) + goto err; /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); - clear_c0_status(STATUSF_IP4); + if (octeon_irq_use_ip4) + set_c0_status(STATUSF_IP4); + else + clear_c0_status(STATUSF_IP4); + + return 0; +err: + return r; } +static int __init octeon_irq_init_gpio( + struct device_node *gpio_node, struct device_node *parent) +{ + struct octeon_irq_gpio_domain_data *gpiod; + u32 interrupt_cells; + unsigned int base_hwirq; + int r; + + r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); + if (r) + return r; + + if (interrupt_cells == 1) { + u32 v; + + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); + if (r) { + pr_warn("No \"interrupts\" property.\n"); + return r; + } + base_hwirq = v; + } else if (interrupt_cells == 2) { + u32 v0, v1; + + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); + if (r) { + pr_warn("No \"interrupts\" property.\n"); + return r; + } + r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); + if (r) { + pr_warn("No \"interrupts\" property.\n"); + return r; + } + base_hwirq = (v0 << 6) | v1; + } else { + pr_warn("Bad \"#interrupt-cells\" property: %u\n", + interrupt_cells); + return -EINVAL; + } + + gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); + if (gpiod) { + /* gpio domain host_data is the base hwirq number. */ + gpiod->base_hwirq = base_hwirq; + irq_domain_add_linear( + gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); + } else { + pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); + return -ENOMEM; + } + + return 0; +} /* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core. @@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data) u64 mask; u64 en_addr; int coreid = data->irq - OCTEON_IRQ_WDOG0; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data) u64 en_addr; int cpu = next_cpu_for_irq(data); int coreid = octeon_coreid_for_cpu(cpu); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data) u64 mask; u64 en_addr; int coreid = cvmx_get_core_num(); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data) u64 mask; u64 en_addr; int coreid = cvmx_get_core_num(); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data) u64 mask; u64 en_addr; int coreid = cvmx_get_core_num(); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data) { int cpu; u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); for_each_online_cpu(cpu) { - u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( + octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } } @@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); for_each_online_cpu(cpu) { - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( + octeon_coreid_for_cpu(cpu)); cvmx_write_csr(en_addr, mask); } } @@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); for_each_online_cpu(cpu) { - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( + octeon_coreid_for_cpu(cpu)); cvmx_write_csr(en_addr, mask); } } @@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data, int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; if (!enable_one) return 0; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << cd.s.bit; + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << cd->bit; for_each_online_cpu(cpu) { u64 en_addr; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( + octeon_coreid_for_cpu(cpu)) + + (0x1000ull * cd->line); } else { - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( + octeon_coreid_for_cpu(cpu)) + + (0x1000ull * cd->line); } cvmx_write_csr(en_addr, mask); } @@ -1461,10 +1776,11 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) { - union octeon_ciu_chip_data cd; - cd.p = irq_data_get_irq_chip_data(data); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); octeon_irq_ciu2_disable_all(data); } @@ -1473,6 +1789,18 @@ static struct irq_chip octeon_irq_chip_ciu2 = { .name = "CIU2-E", .irq_enable = octeon_irq_ciu2_enable, .irq_disable = octeon_irq_ciu2_disable_all, + .irq_mask = octeon_irq_ciu2_disable_local, + .irq_unmask = octeon_irq_ciu2_enable, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu2_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu2_edge = { + .name = "CIU2-E", + .irq_enable = octeon_irq_ciu2_enable, + .irq_disable = octeon_irq_ciu2_disable_all, .irq_ack = octeon_irq_ciu2_ack, .irq_mask = octeon_irq_ciu2_disable_local, .irq_unmask = octeon_irq_ciu2_enable, @@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, if (octeon_irq_ciu2_is_edge(line, bit)) octeon_irq_set_ciu_mapping(virq, line, bit, 0, - &octeon_irq_chip_ciu2, + &octeon_irq_chip_ciu2_edge, handle_edge_irq); else octeon_irq_set_ciu_mapping(virq, line, bit, 0, @@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, return 0; } -static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw) -{ - return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); -} static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { .map = octeon_irq_ciu2_map, + .unmap = octeon_irq_free_cd, .xlate = octeon_irq_ciu2_xlat, }; -static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { - .map = octeon_irq_ciu2_gpio_map, - .xlate = octeon_irq_gpio_xlat, -}; - static void octeon_irq_ciu2(void) { int line; @@ -1674,16 +1993,16 @@ out: return; } -static void __init octeon_irq_init_ciu2(void) +static int __init octeon_irq_init_ciu2( + struct device_node *ciu_node, struct device_node *parent) { - unsigned int i; - struct device_node *gpio_node; - struct device_node *ciu_node; + unsigned int i, r; struct irq_domain *ciu_domain = NULL; octeon_irq_init_ciu2_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; + octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; octeon_irq_ip2 = octeon_irq_ciu2; octeon_irq_ip3 = octeon_irq_ciu2_mbox; octeon_irq_ip4 = octeon_irq_ip4_mask; @@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void) /* Mips internal */ octeon_irq_init_core(); - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); - if (gpio_node) { - struct octeon_irq_gpio_domain_data *gpiod; - - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); - if (gpiod) { - /* gpio domain host_data is the base hwirq number. */ - gpiod->base_hwirq = 7 << 6; - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); - of_node_put(gpio_node); - } else - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); - } else - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); - - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); - if (ciu_node) { - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); - irq_set_default_host(ciu_domain); - of_node_put(ciu_node); - } else - panic("Cannot find device node for cavium,octeon-6880-ciu2."); + ciu_domain = irq_domain_add_tree( + ciu_node, &octeon_irq_domain_ciu2_ops, NULL); + irq_set_default_host(ciu_domain); /* CUI2 */ - for (i = 0; i < 64; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); + for (i = 0; i < 64; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); + if (r) + goto err; + } - for (i = 0; i < 32; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, - &octeon_irq_chip_ciu2_wd, handle_level_irq); + for (i = 0; i < 32; i++) { + r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, + &octeon_irq_chip_ciu2_wd, handle_level_irq); + if (r) + goto err; + } - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); + if (r) + goto err; + } - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); + if (r) + goto err; - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); + if (r) + goto err; + } - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); + if (r) + goto err; + } irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); @@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void) /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); clear_c0_status(STATUSF_IP4); + return 0; +err: + return r; +} + +struct octeon_irq_cib_host_data { + raw_spinlock_t lock; + u64 raw_reg; + u64 en_reg; + int max_bits; +}; + +struct octeon_irq_cib_chip_data { + struct octeon_irq_cib_host_data *host_data; + int bit; +}; + +static void octeon_irq_cib_enable(struct irq_data *data) +{ + unsigned long flags; + u64 en; + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); + struct octeon_irq_cib_host_data *host_data = cd->host_data; + + raw_spin_lock_irqsave(&host_data->lock, flags); + en = cvmx_read_csr(host_data->en_reg); + en |= 1ull << cd->bit; + cvmx_write_csr(host_data->en_reg, en); + raw_spin_unlock_irqrestore(&host_data->lock, flags); +} + +static void octeon_irq_cib_disable(struct irq_data *data) +{ + unsigned long flags; + u64 en; + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); + struct octeon_irq_cib_host_data *host_data = cd->host_data; + + raw_spin_lock_irqsave(&host_data->lock, flags); + en = cvmx_read_csr(host_data->en_reg); + en &= ~(1ull << cd->bit); + cvmx_write_csr(host_data->en_reg, en); + raw_spin_unlock_irqrestore(&host_data->lock, flags); +} + +static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) +{ + irqd_set_trigger_type(data, t); + return IRQ_SET_MASK_OK; +} + +static struct irq_chip octeon_irq_chip_cib = { + .name = "CIB", + .irq_enable = octeon_irq_cib_enable, + .irq_disable = octeon_irq_cib_disable, + .irq_mask = octeon_irq_cib_disable, + .irq_unmask = octeon_irq_cib_enable, + .irq_set_type = octeon_irq_cib_set_type, +}; + +static int octeon_irq_cib_xlat(struct irq_domain *d, + struct device_node *node, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + unsigned int type = 0; + + if (intsize == 2) + type = intspec[1]; + + switch (type) { + case 0: /* unofficial value, but we might as well let it work. */ + case 4: /* official value for level triggering. */ + *out_type = IRQ_TYPE_LEVEL_HIGH; + break; + case 1: /* official value for edge triggering. */ + *out_type = IRQ_TYPE_EDGE_RISING; + break; + default: /* Nothing else is acceptable. */ + return -EINVAL; + } + + *out_hwirq = intspec[0]; + + return 0; +} + +static int octeon_irq_cib_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) +{ + struct octeon_irq_cib_host_data *host_data = d->host_data; + struct octeon_irq_cib_chip_data *cd; + + if (hw >= host_data->max_bits) { + pr_err("ERROR: %s mapping %u is to big!\n", + d->of_node->name, (unsigned)hw); + return -EINVAL; + } + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + cd->host_data = host_data; + cd->bit = hw; + + irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, + handle_simple_irq); + irq_set_chip_data(virq, cd); + return 0; } +static struct irq_domain_ops octeon_irq_domain_cib_ops = { + .map = octeon_irq_cib_map, + .unmap = octeon_irq_free_cd, + .xlate = octeon_irq_cib_xlat, +}; + +/* Chain to real handler. */ +static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) +{ + u64 en; + u64 raw; + u64 bits; + int i; + int irq; + struct irq_domain *cib_domain = data; + struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; + + en = cvmx_read_csr(host_data->en_reg); + raw = cvmx_read_csr(host_data->raw_reg); + + bits = en & raw; + + for (i = 0; i < host_data->max_bits; i++) { + if ((bits & 1ull << i) == 0) + continue; + irq = irq_find_mapping(cib_domain, i); + if (!irq) { + unsigned long flags; + + pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", + i, host_data->raw_reg); + raw_spin_lock_irqsave(&host_data->lock, flags); + en = cvmx_read_csr(host_data->en_reg); + en &= ~(1ull << i); + cvmx_write_csr(host_data->en_reg, en); + cvmx_write_csr(host_data->raw_reg, 1ull << i); + raw_spin_unlock_irqrestore(&host_data->lock, flags); + } else { + struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *irq_data = irq_desc_get_irq_data(desc); + /* If edge, acknowledge the bit we will be sending. */ + if (irqd_get_trigger_type(irq_data) & + IRQ_TYPE_EDGE_BOTH) + cvmx_write_csr(host_data->raw_reg, 1ull << i); + generic_handle_irq_desc(irq, desc); + } + } + + return IRQ_HANDLED; +} + +static int __init octeon_irq_init_cib(struct device_node *ciu_node, + struct device_node *parent) +{ + const __be32 *addr; + u32 val; + struct octeon_irq_cib_host_data *host_data; + int parent_irq; + int r; + struct irq_domain *cib_domain; + + parent_irq = irq_of_parse_and_map(ciu_node, 0); + if (!parent_irq) { + pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", + ciu_node->name); + return -EINVAL; + } + + host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); + raw_spin_lock_init(&host_data->lock); + + addr = of_get_address(ciu_node, 0, NULL, NULL); + if (!addr) { + pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); + return -EINVAL; + } + host_data->raw_reg = (u64)phys_to_virt( + of_translate_address(ciu_node, addr)); + + addr = of_get_address(ciu_node, 1, NULL, NULL); + if (!addr) { + pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); + return -EINVAL; + } + host_data->en_reg = (u64)phys_to_virt( + of_translate_address(ciu_node, addr)); + + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); + if (r) { + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", + ciu_node->name); + return r; + } + host_data->max_bits = val; + + cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, + &octeon_irq_domain_cib_ops, + host_data); + if (!cib_domain) { + pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); + return -ENOMEM; + } + + cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ + cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ + + r = request_irq(parent_irq, octeon_irq_cib_handler, + IRQF_NO_THREAD, "cib", cib_domain); + if (r) { + pr_err("request_irq cib failed %d\n", r); + return r; + } + pr_info("CIB interrupt controller probed: %llx %d\n", + host_data->raw_reg, host_data->max_bits); + return 0; +} + +static struct of_device_id ciu_types[] __initdata = { + {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, + {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, + {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, + {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, + {} +}; + void __init arch_init_irq(void) { #ifdef CONFIG_SMP @@ -1750,10 +2305,7 @@ void __init arch_init_irq(void) cpumask_clear(irq_default_affinity); cpumask_set_cpu(smp_processor_id(), irq_default_affinity); #endif - if (OCTEON_IS_MODEL(OCTEON_CN68XX)) - octeon_irq_init_ciu2(); - else - octeon_irq_init_ciu(); + of_irq_init(ciu_types); } asmlinkage void plat_irq_dispatch(void) @@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void) cop0_cause &= cop0_status; cop0_cause &= ST0_IM; - if (unlikely(cop0_cause & STATUSF_IP2)) + if (cop0_cause & STATUSF_IP2) octeon_irq_ip2(); - else if (unlikely(cop0_cause & STATUSF_IP3)) + else if (cop0_cause & STATUSF_IP3) octeon_irq_ip3(); - else if (unlikely(cop0_cause & STATUSF_IP4)) + else if (cop0_cause & STATUSF_IP4) octeon_irq_ip4(); - else if (likely(cop0_cause)) + else if (cop0_cause) do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); else break; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 94f888d3384e..a42110e7edbc 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -41,6 +41,7 @@ #include <asm/octeon/octeon.h> #include <asm/octeon/pci-octeon.h> #include <asm/octeon/cvmx-mio-defs.h> +#include <asm/octeon/cvmx-rst-defs.h> extern struct plat_smp_ops octeon_smp_ops; @@ -579,12 +580,10 @@ void octeon_user_io_init(void) /* R/W If set, CVMSEG is available for loads/stores in user * mode. */ cvmmemctl.s.cvmsegenau = 0; - /* R/W Size of local memory in cache blocks, 54 (6912 bytes) - * is max legal value. */ - cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; write_c0_cvmmemctl(cvmmemctl.u64); + /* Setup of CVMSEG is done in kernel-entry-init.h */ if (smp_processor_id() == 0) pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, @@ -615,6 +614,7 @@ void __init prom_init(void) const char *arg; char *p; int i; + u64 t; int argc; #ifdef CONFIG_CAVIUM_RESERVE32 int64_t addr = -1; @@ -654,15 +654,56 @@ void __init prom_init(void) sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + if (OCTEON_IS_OCTEON2()) { /* I/O clock runs at a different rate than the CPU. */ union cvmx_mio_rst_boot rst_boot; rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; + } else if (OCTEON_IS_OCTEON3()) { + /* I/O clock runs at a different rate than the CPU. */ + union cvmx_rst_boot rst_boot; + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); + octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; } else { octeon_io_clock_rate = sysinfo->cpu_clock_hz; } + t = read_c0_cvmctl(); + if ((t & (1ull << 27)) == 0) { + /* + * Setup the multiplier save/restore code if + * CvmCtl[NOMUL] clear. + */ + void *save; + void *save_end; + void *restore; + void *restore_end; + int save_len; + int restore_len; + int save_max = (char *)octeon_mult_save_end - + (char *)octeon_mult_save; + int restore_max = (char *)octeon_mult_restore_end - + (char *)octeon_mult_restore; + if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) { + save = octeon_mult_save3; + save_end = octeon_mult_save3_end; + restore = octeon_mult_restore3; + restore_end = octeon_mult_restore3_end; + } else { + save = octeon_mult_save2; + save_end = octeon_mult_save2_end; + restore = octeon_mult_restore2; + restore_end = octeon_mult_restore2_end; + } + save_len = (char *)save_end - (char *)save; + restore_len = (char *)restore_end - (char *)restore; + if (!WARN_ON(save_len > save_max || + restore_len > restore_max)) { + memcpy(octeon_mult_save, save, save_len); + memcpy(octeon_mult_restore, restore, restore_len); + } + } + /* * Only enable the LED controller if we're running on a CN38XX, CN58XX, * or CN56XX. The CN30XX and CN31XX don't have an LED controller. @@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar); void prom_free_prom_memory(void) { - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { /* Check for presence of Core-14449 fix. */ u32 insn; u32 *foo; @@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void) panic("No PREF instruction at Core-14449 probe point."); if (((insn >> 16) & 0x1f) != 28) - panic("Core-14449 WAR not in place (%04x).\n" - "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); + panic("OCTEON II DCache prefetch workaround not in place (%04x).\n" + "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", + insn); } } diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig new file mode 100644 index 000000000000..4bce1f8ebe98 --- /dev/null +++ b/arch/mips/configs/malta_qemu_32r6_defconfig @@ -0,0 +1,193 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R6=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=4 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index f9f5307434c2..19f710117d97 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c @@ -9,6 +9,7 @@ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999 Silicon Graphics, Inc. */ +#include <linux/compiler.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/irqflags.h> @@ -19,50 +20,55 @@ #include <asm/sgialib.h> #include <asm/bootinfo.h> -VOID +VOID __noreturn ArcHalt(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(halt); -never: goto never; + + unreachable(); } -VOID +VOID __noreturn ArcPowerDown(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(pdown); -never: goto never; + + unreachable(); } /* XXX is this a soft reset basically? XXX */ -VOID +VOID __noreturn ArcRestart(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(restart); -never: goto never; + + unreachable(); } -VOID +VOID __noreturn ArcReboot(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(reboot); -never: goto never; + + unreachable(); } -VOID +VOID __noreturn ArcEnterInteractiveMode(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(imode); -never: goto never; + + unreachable(); } LONG diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 200efeac4181..526539cbc99f 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,4 +1,5 @@ # MIPS headers +generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += cputime.h generic-y += current.h generic-y += dma-contiguous.h diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 6caf8766b80f..0cae4595e985 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -19,7 +19,7 @@ #include <asm/asmmacro-64.h> #endif -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) .macro local_irq_enable reg=t0 ei irq_enable_hazard @@ -104,7 +104,8 @@ .endm .macro fpu_save_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) sll \tmp, \status, 5 bgez \tmp, 10f fpu_save_16odd \thread @@ -160,7 +161,8 @@ .endm .macro fpu_restore_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) sll \tmp, \status, 5 bgez \tmp, 10f # 16 register mode? @@ -170,16 +172,16 @@ fpu_restore_16even \thread \tmp .endm -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) .macro _EXT rd, rs, p, s ext \rd, \rs, \p, \s .endm -#else /* !CONFIG_CPU_MIPSR2 */ +#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ .macro _EXT rd, rs, p, s srl \rd, \rs, \p andi \rd, \rd, (1 << \s) - 1 .endm -#endif /* !CONFIG_CPU_MIPSR2 */ +#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ /* * Temporary until all gas have MT ASE support @@ -304,7 +306,7 @@ .set push .set noat SET_HARDFLOAT - add $1, \base, \off + addu $1, \base, \off .word LDD_MSA_INSN | (\wd << 6) .set pop .endm @@ -313,7 +315,7 @@ .set push .set noat SET_HARDFLOAT - add $1, \base, \off + addu $1, \base, \off .word STD_MSA_INSN | (\wd << 6) .set pop .endm diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 857da84cfc92..26d436336f2e 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ " sc %0, %1 \n" \ " beqzl %0, 1b \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " ll %0, %1 # atomic_" #op "\n" \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ " " #asm_op " %0, %1, %3 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF12_ASM() (v->counter) \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " ll %1, %2 # atomic_" #op "_return \n" \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF12_ASM() (v->counter) \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!result)); \ \ @@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "+" GCC_OFF12_ASM() (v->counter) - : "Ir" (i), GCC_OFF12_ASM() (v->counter) + "+" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { int temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "+" GCC_OFF12_ASM() (v->counter) + "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; @@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ " scd %0, %1 \n" \ " beqzl %0, 1b \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " lld %0, %1 # atomic64_" #op "\n" \ " " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ " " #asm_op " %0, %1, %3 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF12_ASM() (v->counter) \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " lld %1, %2 # atomic64_" #op "_return\n" \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "=" GCC_OFF12_ASM() (v->counter) \ - : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ + "=" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ : "memory"); \ } while (unlikely(!result)); \ \ @@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "=" GCC_OFF12_ASM() (v->counter) - : "Ir" (i), GCC_OFF12_ASM() (v->counter) + "=" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "+" GCC_OFF12_ASM() (v->counter) + "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 6663bcca9d0c..9f935f6aa996 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) - : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); -#ifdef CONFIG_CPU_MIPSR2 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) + : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { __asm__ __volatile__( " " __LL "%0, %1 # set_bit \n" " " __INS "%0, %3, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit), "r" (~0)); } while (unlikely(!temp)); -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { __asm__ __volatile__( " " __LL "%0, %1 # clear_bit \n" " " __INS "%0, $0, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit)); } while (unlikely(!temp)); -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); } else @@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; @@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr, " " __EXT "%2, %0, %3, 1 \n" " " __INS "%0, $0, %3, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "ir" (bit) : "memory"); } while (unlikely(!temp)); @@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" " xor %2, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word) __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" - " .set mips32 \n" + " .set "MIPS_ISA_LEVEL" \n" " clz %0, %1 \n" " .set pop \n" : "=r" (num) @@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word) __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { __asm__( " .set push \n" - " .set mips64 \n" + " .set "MIPS_ISA_LEVEL" \n" " dclz %0, %1 \n" " .set pop \n" : "=r" (num) @@ -562,7 +562,7 @@ static inline int fls(int x) if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" - " .set mips32 \n" + " .set "MIPS_ISA_LEVEL" \n" " clz %0, %1 \n" " .set pop \n" : "=r" (x) diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 3418c51e1151..5c585c5c1c3e 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -12,6 +12,10 @@ #ifndef _ASM_CHECKSUM_H #define _ASM_CHECKSUM_H +#ifdef CONFIG_GENERIC_CSUM +#include <asm-generic/checksum.h> +#else + #include <linux/in6.h> #include <asm/uaccess.h> @@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); +#define csum_partial_copy_nocheck csum_partial_copy_nocheck /* * Fold a partial checksum without adding pseudo headers */ -static inline __sum16 csum_fold(__wsum sum) +static inline __sum16 csum_fold(__wsum csum) { - __asm__( - " .set push # csum_fold\n" - " .set noat \n" - " sll $1, %0, 16 \n" - " addu %0, $1 \n" - " sltu $1, %0, $1 \n" - " srl %0, %0, 16 \n" - " addu %0, $1 \n" - " xori %0, 0xffff \n" - " .set pop" - : "=r" (sum) - : "0" (sum)); + u32 sum = (__force u32)csum;; - return (__force __sum16)sum; + sum += (sum << 16); + csum = (sum < csum); + sum >>= 16; + sum += csum; + + return (__force __sum16)~sum; } +#define csum_fold csum_fold /* * This is a version of ip_compute_csum() optimized for IP headers, @@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) return csum_fold(csum); } +#define ip_fast_csum ip_fast_csum static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, @@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, return sum; } - -/* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ -static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); -} +#define csum_tcpudp_nofold csum_tcpudp_nofold /* * this routine is used for miscellaneous IP-like checksums, mainly @@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold(sum); } +#include <asm-generic/checksum.h> +#endif /* CONFIG_GENERIC_CSUM */ + #endif /* _ASM_CHECKSUM_H */ diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 28b1edf19501..d0a2a68ca600 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " sc %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " ll %0, %3 # xchg_u32 \n" " .set mips0 \n" " move %2, %z4 \n" - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " sc %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " scd %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " lld %0, %3 # xchg_u64 \n" " move %2, %z4 \n" " scd %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqzl $1, 1b \n" \ "2: \n" \ " .set pop \n" \ - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else if (kernel_uses_llsc) { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: " ld " %0, %2 # __cmpxchg_asm \n" \ " bne %0, %z3, 2f \n" \ " .set mips0 \n" \ " move $1, %z4 \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ " " st " $1, %1 \n" \ " beqz $1, 1b \n" \ " .set pop \n" \ "2: \n" \ - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else { \ unsigned long __flags; \ diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index c73815e0123a..e081a265f422 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -16,12 +16,30 @@ #define GCC_REG_ACCUM "accum" #endif +#ifdef CONFIG_CPU_MIPSR6 +/* All MIPS R6 toolchains support the ZC constrain */ +#define GCC_OFF_SMALL_ASM() "ZC" +#else #ifndef CONFIG_CPU_MICROMIPS -#define GCC_OFF12_ASM() "R" +#define GCC_OFF_SMALL_ASM() "R" #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) -#define GCC_OFF12_ASM() "ZC" +#define GCC_OFF_SMALL_ASM() "ZC" #else #error "microMIPS compilation unsupported with GCC older than 4.9" -#endif +#endif /* CONFIG_CPU_MICROMIPS */ +#endif /* CONFIG_CPU_MIPSR6 */ + +#ifdef CONFIG_CPU_MIPSR6 +#define MIPS_ISA_LEVEL "mips64r6" +#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL +#define MIPS_ISA_LEVEL_RAW mips64r6 +#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW +#else +/* MIPS64 is a superset of MIPS32 */ +#define MIPS_ISA_LEVEL "mips64r2" +#define MIPS_ISA_ARCH_LEVEL "arch=r4000" +#define MIPS_ISA_LEVEL_RAW mips64r2 +#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW +#endif /* CONFIG_CPU_MIPSR6 */ #endif /* _ASM_COMPILER_H */ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 2897cfafcaf0..0d8208de9a3f 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -38,6 +38,9 @@ #ifndef cpu_has_maar #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) #endif +#ifndef cpu_has_rw_llb +#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB) +#endif /* * For the moment we don't consider R6000 and R8000 so we can assume that @@ -171,6 +174,9 @@ #endif #endif +#ifndef cpu_has_mips_1 +# define cpu_has_mips_1 (!cpu_has_mips_r6) +#endif #ifndef cpu_has_mips_2 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) #endif @@ -189,12 +195,18 @@ #ifndef cpu_has_mips32r2 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) #endif +#ifndef cpu_has_mips32r6 +# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6) +#endif #ifndef cpu_has_mips64r1 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) #endif #ifndef cpu_has_mips64r2 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) #endif +#ifndef cpu_has_mips64r6 +# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) +#endif /* * Shortcuts ... @@ -208,17 +220,23 @@ #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) -#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) +#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \ + cpu_has_mips_r6) -#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) -#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) +#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) +#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) +#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6) #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ - cpu_has_mips64r1 | cpu_has_mips64r2) + cpu_has_mips32r6 | cpu_has_mips64r1 | \ + cpu_has_mips64r2 | cpu_has_mips64r6) + +/* MIPSR2 and MIPSR6 have a lot of similarities */ +#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) #ifndef cpu_has_mips_r2_exec_hazard -#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 +#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) #endif /* diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a6c9ccb33c5c..c3f4f2d2e108 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -84,6 +84,11 @@ struct cpuinfo_mips { * (shifted by _CACHE_SHIFT) */ unsigned int writecombine; + /* + * Simple counter to prevent enabling HTW in nested + * htw_start/htw_stop calls + */ + unsigned int htw_seq; } __attribute__((aligned(SMP_CACHE_BYTES))); extern struct cpuinfo_mips cpu_data[]; diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index b4e2bd87df50..8245875f8b33 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_M5150: #endif +#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \ + defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \ + defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \ + defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) + case CPU_QEMU_GENERIC: +#endif + #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 case CPU_5KC: case CPU_5KE: diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 33866fce4d63..15687234d70a 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -93,6 +93,7 @@ * These are the PRID's for when 23:16 == PRID_COMP_MIPS */ +#define PRID_IMP_QEMU_GENERIC 0x0000 #define PRID_IMP_4KC 0x8000 #define PRID_IMP_5KC 0x8100 #define PRID_IMP_20KC 0x8200 @@ -312,6 +313,8 @@ enum cpu_type_enum { CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, + CPU_QEMU_GENERIC, + CPU_LAST }; @@ -329,11 +332,14 @@ enum cpu_type_enum { #define MIPS_CPU_ISA_M32R2 0x00000020 #define MIPS_CPU_ISA_M64R1 0x00000040 #define MIPS_CPU_ISA_M64R2 0x00000080 +#define MIPS_CPU_ISA_M32R6 0x00000100 +#define MIPS_CPU_ISA_M64R6 0x00000200 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ - MIPS_CPU_ISA_M32R2) + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6) #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ - MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) + MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \ + MIPS_CPU_ISA_M64R6) /* * CPU Option encodings @@ -370,6 +376,7 @@ enum cpu_type_enum { #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ +#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h index ae6fedcb0060..94105d3f58f4 100644 --- a/arch/mips/include/asm/edac.h +++ b/arch/mips/include/asm/edac.h @@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size) " sc %0, %1 \n" " beqz %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) - : GCC_OFF12_ASM() (*virt_addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) + : GCC_OFF_SMALL_ASM() (*virt_addr)); virt_addr++; } diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index eb4d95de619c..535f196ffe02 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm); struct arch_elf_state { int fp_abi; int interp_fp_abi; - int overall_abi; + int overall_fp_mode; }; +#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */ + #define INIT_ARCH_ELF_STATE { \ - .fp_abi = -1, \ - .interp_fp_abi = -1, \ - .overall_abi = -1, \ + .fp_abi = MIPS_ABI_FP_UNKNOWN, \ + .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \ + .overall_fp_mode = -1, \ } extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index affebb78f5d6..dd083e999b08 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode) goto fr_common; case FPU_64BIT: -#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) +#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ + || defined(CONFIG_64BIT)) /* we only have a 32-bit FPU */ return SIGFPE; #endif diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index ef9987a61d88..1de190bdfb9c 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -45,19 +45,19 @@ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ : "=r" (ret), "=&r" (oldval), \ - "=" GCC_OFF12_ASM() (*uaddr) \ - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ " .set mips0 \n" \ " " insn " \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "2: "user_sc("$1", "%2")" \n" \ " beqz $1, 1b \n" \ __WEAK_LLSC_MB \ @@ -74,8 +74,8 @@ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ : "=r" (ret), "=&r" (oldval), \ - "=" GCC_OFF12_ASM() (*uaddr) \ - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ "i" (-EFAULT) \ : "memory"); \ } else \ @@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { @@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" " .set noat \n" - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1: "user_ll("%1", "%3")" \n" " bne %1, %z4, 3f \n" " .set mips0 \n" " move $1, %z5 \n" - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "2: "user_sc("$1", "%2")" \n" " beqz $1, 1b \n" __WEAK_LLSC_MB @@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h index 4be1a57cdbb0..71a986e9b694 100644 --- a/arch/mips/include/asm/gio_device.h +++ b/arch/mips/include/asm/gio_device.h @@ -25,8 +25,6 @@ struct gio_driver { int (*probe)(struct gio_device *, const struct gio_device_id *); void (*remove)(struct gio_device *); - int (*suspend)(struct gio_device *, pm_message_t); - int (*resume)(struct gio_device *); void (*shutdown)(struct gio_device *); struct device_driver driver; diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index e3ee92d4dbe7..4087b47ad1cb 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h @@ -11,6 +11,7 @@ #define _ASM_HAZARDS_H #include <linux/stringify.h> +#include <asm/compiler.h> #define ___ssnop \ sll $0, $0, 1 @@ -21,7 +22,7 @@ /* * TLB hazards */ -#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON) /* * MIPSR2 defines ehb for hazard avoidance @@ -58,7 +59,7 @@ do { \ unsigned long tmp; \ \ __asm__ __volatile__( \ - " .set mips64r2 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " dla %0, 1f \n" \ " jr.hb %0 \n" \ " .set mips0 \n" \ @@ -132,7 +133,7 @@ do { \ #define instruction_hazard() \ do { \ - if (cpu_has_mips_r2) \ + if (cpu_has_mips_r2_r6) \ __instruction_hazard(); \ } while (0) @@ -240,7 +241,7 @@ do { \ #define __disable_fpu_hazard -#elif defined(CONFIG_CPU_MIPSR2) +#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #define __enable_fpu_hazard \ ___ehb diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 0fa5fdcd1f01..d60cc68fa31e 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -15,9 +15,10 @@ #include <linux/compiler.h> #include <linux/stringify.h> +#include <asm/compiler.h> #include <asm/hazards.h> -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6) static inline void arch_local_irq_disable(void) { @@ -118,7 +119,7 @@ void arch_local_irq_disable(void); unsigned long arch_local_irq_save(void); void arch_local_irq_restore(unsigned long flags); void __arch_local_irq_restore(unsigned long flags); -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ static inline void arch_local_irq_enable(void) { @@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void) " .set push \n" " .set reorder \n" " .set noat \n" -#if defined(CONFIG_CPU_MIPSR2) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) " ei \n" #else " mfc0 $1,$12 \n" diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index 46dfc3c1fd49..8feaed62a2ab 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -5,6 +5,7 @@ #include <linux/bitops.h> #include <linux/atomic.h> #include <asm/cmpxchg.h> +#include <asm/compiler.h> #include <asm/war.h> typedef struct @@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_add_return \n" " addu %0, %1, %3 \n" __SC "%0, %2 \n" @@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_sub_return \n" " subu %0, %1, %3 \n" __SC "%0, %2 \n" diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 1668ee57acb9..cf92fe733995 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -8,11 +8,10 @@ #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H - -#define CP0_CYCLE_COUNTER $9, 6 #define CP0_CVMCTL_REG $9, 7 #define CP0_CVMMEMCTL_REG $11,7 #define CP0_PRID_REG $15, 0 +#define CP0_DCACHE_ERR_REG $27, 1 #define CP0_PRID_OCTEON_PASS1 0x000d0000 #define CP0_PRID_OCTEON_CN30XX 0x000d0200 @@ -38,36 +37,55 @@ # Needed for octeon specific memcpy or v0, v0, 0x5001 xor v0, v0, 0x1001 - # Read the processor ID register - mfc0 v1, CP0_PRID_REG - # Disable instruction prefetching (Octeon Pass1 errata) - or v0, v0, 0x2000 - # Skip reenable of prefetching for Octeon Pass1 - beq v1, CP0_PRID_OCTEON_PASS1, skip - nop - # Reenable instruction prefetching, not on Pass1 - xor v0, v0, 0x2000 - # Strip off pass number off of processor id - srl v1, 8 - sll v1, 8 - # CN30XX needs some extra stuff turned off for better performance - bne v1, CP0_PRID_OCTEON_CN30XX, skip - nop - # CN30XX Use random Icache replacement - or v0, v0, 0x400 - # CN30XX Disable instruction prefetching - or v0, v0, 0x2000 -skip: # First clear off CvmCtl[IPPCI] bit and move the performance # counters interrupt to IRQ 6 - li v1, ~(7 << 7) + dli v1, ~(7 << 7) and v0, v0, v1 ori v0, v0, (6 << 7) + + mfc0 v1, CP0_PRID_REG + and t1, v1, 0xfff8 + xor t1, t1, 0x9000 # 63-P1 + beqz t1, 4f + and t1, v1, 0xfff8 + xor t1, t1, 0x9008 # 63-P2 + beqz t1, 4f + and t1, v1, 0xfff8 + xor t1, t1, 0x9100 # 68-P1 + beqz t1, 4f + and t1, v1, 0xff00 + xor t1, t1, 0x9200 # 66-PX + bnez t1, 5f # Skip WAR for others. + and t1, v1, 0x00ff + slti t1, t1, 2 # 66-P1.2 and later good. + beqz t1, 5f + +4: # core-16057 work around + or v0, v0, 0x2000 # Set IPREF bit. + +5: # No core-16057 work around # Write the cavium control register dmtc0 v0, CP0_CVMCTL_REG sync # Flush dcache after config change cache 9, 0($0) + # Zero all of CVMSEG to make sure parity is correct + dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE + dsll v0, 7 + beqz v0, 2f +1: dsubu v0, 8 + sd $0, -32768(v0) + bnez v0, 1b +2: + mfc0 v0, CP0_PRID_REG + bbit0 v0, 15, 1f + # OCTEON II or better have bit 15 set. Clear the error bits. + and t1, v0, 0xff00 + dli v0, 0x9500 + bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0 + dli v0, 0x27 + dmtc0 v0, CP0_DCACHE_ERR_REG +1: # Get my core id rdhwr v0, $0 # Jump the master to kernel_entry diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h index eb72b35cf04b..35c80be92207 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/war.h +++ b/arch/mips/include/asm/mach-cavium-octeon/war.h @@ -22,4 +22,7 @@ #define R10000_LLSC_WAR 0 #define MIPS34K_MISSED_ITLB_WAR 0 +#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \ + OCTEON_IS_MODEL(OCTEON_CN6XXX) + #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h index 986982db7c38..79cff26d8b36 100644 --- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h +++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h @@ -27,8 +27,6 @@ struct jz_nand_platform_data { struct nand_ecclayout *ecc_layout; - unsigned int busy_gpio; - unsigned char banks[JZ_NAND_NUM_BANKS]; void (*ident_callback)(struct platform_device *, struct nand_chip *, diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h index 2e54b4bff5cf..90dbe43c8d27 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h @@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (mask), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (~mask), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (mask), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " .set arch=r4000 \n" \ "1: ll %0, %1 #custom_read_reg32 \n" \ " .set pop \n" \ - : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ - : GCC_OFF12_ASM() (*address)) + : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ + : GCC_OFF_SMALL_ASM() (*address)) #define custom_write_reg32(address, tmp) \ __asm__ __volatile__( \ @@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " "__beqz"%0, 1b \n" \ " nop \n" \ " .set pop \n" \ - : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ - : "0" (tmp), GCC_OFF12_ASM() (*address)) + : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ + : "0" (tmp), GCC_OFF_SMALL_ASM() (*address)) #endif /* __ASM_REGOPS_H__ */ diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h new file mode 100644 index 000000000000..60570f2c3ba2 --- /dev/null +++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h @@ -0,0 +1,96 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * Author: Markos Chandras <markos.chandras@imgtec.com> + */ + +#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H +#define __ASM_MIPS_R2_TO_R6_EMUL_H + +struct mips_r2_emulator_stats { + u64 movs; + u64 hilo; + u64 muls; + u64 divs; + u64 dsps; + u64 bops; + u64 traps; + u64 fpus; + u64 loads; + u64 stores; + u64 llsc; + u64 dsemul; +}; + +struct mips_r2br_emulator_stats { + u64 jrs; + u64 bltzl; + u64 bgezl; + u64 bltzll; + u64 bgezll; + u64 bltzall; + u64 bgezall; + u64 bltzal; + u64 bgezal; + u64 beql; + u64 bnel; + u64 blezl; + u64 bgtzl; +}; + +#ifdef CONFIG_DEBUG_FS + +#define MIPS_R2_STATS(M) \ +do { \ + u32 nir; \ + int err; \ + \ + preempt_disable(); \ + __this_cpu_inc(mipsr2emustats.M); \ + err = __get_user(nir, (u32 __user *)regs->cp0_epc); \ + if (!err) { \ + if (nir == BREAK_MATH) \ + __this_cpu_inc(mipsr2bdemustats.M); \ + } \ + preempt_enable(); \ +} while (0) + +#define MIPS_R2BR_STATS(M) \ +do { \ + preempt_disable(); \ + __this_cpu_inc(mipsr2bremustats.M); \ + preempt_enable(); \ +} while (0) + +#else + +#define MIPS_R2_STATS(M) do { } while (0) +#define MIPS_R2BR_STATS(M) do { } while (0) + +#endif /* CONFIG_DEBUG_FS */ + +struct r2_decoder_table { + u32 mask; + u32 code; + int (*func)(struct pt_regs *regs, u32 inst); +}; + + +extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str); + +#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR +static int mipsr2_emulation; +static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; }; +#else +/* MIPS R2 Emulator ON/OFF */ +extern int mipsr2_emulation; +extern int mipsr2_decoder(struct pt_regs *regs, u32 inst); +#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */ + +#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) + +#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 5b720d8c2745..fef004434096 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -653,6 +653,7 @@ #define MIPS_CONF5_NF (_ULCAST_(1) << 0) #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) +#define MIPS_CONF5_LLB (_ULCAST_(1) << 4) #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) @@ -1127,6 +1128,8 @@ do { \ #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) +#define read_c0_lladdr() __read_ulong_c0_register($17, 0) +#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val) #define read_c0_maar() __read_ulong_c0_register($17, 1) #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) #define read_c0_maari() __read_32bit_c0_register($17, 2) @@ -1909,6 +1912,7 @@ __BUILD_SET_C0(config5) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) +__BUILD_SET_C0(pagegrain) __BUILD_SET_C0(brcm_config_0) __BUILD_SET_C0(brcm_bus_pll) __BUILD_SET_C0(brcm_reset) diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index c436138945a8..1afa1f986df8 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -1,9 +1,12 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H +#include <linux/atomic.h> + typedef struct { unsigned long asid[NR_CPUS]; void *vdso; + atomic_t fp_mode_switching; } mm_context_t; #endif /* __ASM_MMU_H */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 2f82568a3ee4..45914b59824c 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -25,7 +25,6 @@ do { \ if (cpu_has_htw) { \ write_c0_pwbase(pgd); \ back_to_back_c0_hazard(); \ - htw_reset(); \ } \ } while (0) @@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) for_each_possible_cpu(i) cpu_context(i, mm) = 0; + atomic_set(&mm->context.fp_mode_switching, 0); + return 0; } @@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned long flags; local_irq_save(flags); + htw_stop(); /* Check if our ASID is of an older version and thus invalid */ if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) get_new_mmu_context(next, cpu); @@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); + htw_start(); local_irq_restore(flags); } @@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) local_irq_save(flags); + htw_stop(); /* Unconditionally get a new ASID. */ get_new_mmu_context(next, cpu); @@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) /* mark mmu ownership change */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); + htw_start(); local_irq_restore(flags); } @@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) unsigned long flags; local_irq_save(flags); + htw_stop(); if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { get_new_mmu_context(mm, cpu); @@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) /* will get a new context next time */ cpu_context(cpu, mm) = 0; } + htw_start(); local_irq_restore(flags); } diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 800fe578dc99..0aaf9a01ea50 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "MIPS32_R1 " #elif defined CONFIG_CPU_MIPS32_R2 #define MODULE_PROC_FAMILY "MIPS32_R2 " +#elif defined CONFIG_CPU_MIPS32_R6 +#define MODULE_PROC_FAMILY "MIPS32_R6 " #elif defined CONFIG_CPU_MIPS64_R1 #define MODULE_PROC_FAMILY "MIPS64_R1 " #elif defined CONFIG_CPU_MIPS64_R2 #define MODULE_PROC_FAMILY "MIPS64_R2 " +#elif defined CONFIG_CPU_MIPS64_R6 +#define MODULE_PROC_FAMILY "MIPS64_R6 " #elif defined CONFIG_CPU_R3000 #define MODULE_PROC_FAMILY "R3000 " #elif defined CONFIG_CPU_TX39XX diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h index 75739c83f07e..8d05d9069823 100644 --- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h +++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h @@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, " lbu %[ticket], %[now_serving]\n" "4:\n" ".set pop\n" : - [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), + [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), [my_ticket] "=r"(my_ticket) ); diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h new file mode 100644 index 000000000000..0c9c3e74d4ae --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h @@ -0,0 +1,306 @@ +/***********************license start*************** + * Author: Cavium Inc. + * + * Contact: support@cavium.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2014 Cavium Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Inc. for more information + ***********************license end**************************************/ + +#ifndef __CVMX_RST_DEFS_H__ +#define __CVMX_RST_DEFS_H__ + +#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull)) +#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull)) +#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull)) +#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8) +#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull)) +#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull)) +#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull)) +#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull)) +#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull)) +#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull)) +#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8) +#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull)) + +union cvmx_rst_boot { + uint64_t u64; + struct cvmx_rst_boot_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t chipkill:1; + uint64_t jtcsrdis:1; + uint64_t ejtagdis:1; + uint64_t romen:1; + uint64_t ckill_ppdis:1; + uint64_t jt_tstmode:1; + uint64_t vrm_err:1; + uint64_t reserved_37_56:20; + uint64_t c_mul:7; + uint64_t pnr_mul:6; + uint64_t reserved_21_23:3; + uint64_t lboot_oci:3; + uint64_t lboot_ext:6; + uint64_t lboot:10; + uint64_t rboot:1; + uint64_t rboot_pin:1; +#else + uint64_t rboot_pin:1; + uint64_t rboot:1; + uint64_t lboot:10; + uint64_t lboot_ext:6; + uint64_t lboot_oci:3; + uint64_t reserved_21_23:3; + uint64_t pnr_mul:6; + uint64_t c_mul:7; + uint64_t reserved_37_56:20; + uint64_t vrm_err:1; + uint64_t jt_tstmode:1; + uint64_t ckill_ppdis:1; + uint64_t romen:1; + uint64_t ejtagdis:1; + uint64_t jtcsrdis:1; + uint64_t chipkill:1; +#endif + } s; + struct cvmx_rst_boot_s cn70xx; + struct cvmx_rst_boot_s cn70xxp1; + struct cvmx_rst_boot_s cn78xx; +}; + +union cvmx_rst_cfg { + uint64_t u64; + struct cvmx_rst_cfg_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t bist_delay:58; + uint64_t reserved_3_5:3; + uint64_t cntl_clr_bist:1; + uint64_t warm_clr_bist:1; + uint64_t soft_clr_bist:1; +#else + uint64_t soft_clr_bist:1; + uint64_t warm_clr_bist:1; + uint64_t cntl_clr_bist:1; + uint64_t reserved_3_5:3; + uint64_t bist_delay:58; +#endif + } s; + struct cvmx_rst_cfg_s cn70xx; + struct cvmx_rst_cfg_s cn70xxp1; + struct cvmx_rst_cfg_s cn78xx; +}; + +union cvmx_rst_ckill { + uint64_t u64; + struct cvmx_rst_ckill_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_47_63:17; + uint64_t timer:47; +#else + uint64_t timer:47; + uint64_t reserved_47_63:17; +#endif + } s; + struct cvmx_rst_ckill_s cn70xx; + struct cvmx_rst_ckill_s cn70xxp1; + struct cvmx_rst_ckill_s cn78xx; +}; + +union cvmx_rst_ctlx { + uint64_t u64; + struct cvmx_rst_ctlx_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_10_63:54; + uint64_t prst_link:1; + uint64_t rst_done:1; + uint64_t rst_link:1; + uint64_t host_mode:1; + uint64_t reserved_4_5:2; + uint64_t rst_drv:1; + uint64_t rst_rcv:1; + uint64_t rst_chip:1; + uint64_t rst_val:1; +#else + uint64_t rst_val:1; + uint64_t rst_chip:1; + uint64_t rst_rcv:1; + uint64_t rst_drv:1; + uint64_t reserved_4_5:2; + uint64_t host_mode:1; + uint64_t rst_link:1; + uint64_t rst_done:1; + uint64_t prst_link:1; + uint64_t reserved_10_63:54; +#endif + } s; + struct cvmx_rst_ctlx_s cn70xx; + struct cvmx_rst_ctlx_s cn70xxp1; + struct cvmx_rst_ctlx_s cn78xx; +}; + +union cvmx_rst_delay { + uint64_t u64; + struct cvmx_rst_delay_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_32_63:32; + uint64_t warm_rst_dly:16; + uint64_t soft_rst_dly:16; +#else + uint64_t soft_rst_dly:16; + uint64_t warm_rst_dly:16; + uint64_t reserved_32_63:32; +#endif + } s; + struct cvmx_rst_delay_s cn70xx; + struct cvmx_rst_delay_s cn70xxp1; + struct cvmx_rst_delay_s cn78xx; +}; + +union cvmx_rst_eco { + uint64_t u64; + struct cvmx_rst_eco_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_32_63:32; + uint64_t eco_rw:32; +#else + uint64_t eco_rw:32; + uint64_t reserved_32_63:32; +#endif + } s; + struct cvmx_rst_eco_s cn78xx; +}; + +union cvmx_rst_int { + uint64_t u64; + struct cvmx_rst_int_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_12_63:52; + uint64_t perst:4; + uint64_t reserved_4_7:4; + uint64_t rst_link:4; +#else + uint64_t rst_link:4; + uint64_t reserved_4_7:4; + uint64_t perst:4; + uint64_t reserved_12_63:52; +#endif + } s; + struct cvmx_rst_int_cn70xx { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_11_63:53; + uint64_t perst:3; + uint64_t reserved_3_7:5; + uint64_t rst_link:3; +#else + uint64_t rst_link:3; + uint64_t reserved_3_7:5; + uint64_t perst:3; + uint64_t reserved_11_63:53; +#endif + } cn70xx; + struct cvmx_rst_int_cn70xx cn70xxp1; + struct cvmx_rst_int_s cn78xx; +}; + +union cvmx_rst_ocx { + uint64_t u64; + struct cvmx_rst_ocx_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_3_63:61; + uint64_t rst_link:3; +#else + uint64_t rst_link:3; + uint64_t reserved_3_63:61; +#endif + } s; + struct cvmx_rst_ocx_s cn78xx; +}; + +union cvmx_rst_power_dbg { + uint64_t u64; + struct cvmx_rst_power_dbg_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_3_63:61; + uint64_t str:3; +#else + uint64_t str:3; + uint64_t reserved_3_63:61; +#endif + } s; + struct cvmx_rst_power_dbg_s cn78xx; +}; + +union cvmx_rst_pp_power { + uint64_t u64; + struct cvmx_rst_pp_power_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_48_63:16; + uint64_t gate:48; +#else + uint64_t gate:48; + uint64_t reserved_48_63:16; +#endif + } s; + struct cvmx_rst_pp_power_cn70xx { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_4_63:60; + uint64_t gate:4; +#else + uint64_t gate:4; + uint64_t reserved_4_63:60; +#endif + } cn70xx; + struct cvmx_rst_pp_power_cn70xx cn70xxp1; + struct cvmx_rst_pp_power_s cn78xx; +}; + +union cvmx_rst_soft_prstx { + uint64_t u64; + struct cvmx_rst_soft_prstx_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_1_63:63; + uint64_t soft_prst:1; +#else + uint64_t soft_prst:1; + uint64_t reserved_1_63:63; +#endif + } s; + struct cvmx_rst_soft_prstx_s cn70xx; + struct cvmx_rst_soft_prstx_s cn70xxp1; + struct cvmx_rst_soft_prstx_s cn78xx; +}; + +union cvmx_rst_soft_rst { + uint64_t u64; + struct cvmx_rst_soft_rst_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_1_63:63; + uint64_t soft_rst:1; +#else + uint64_t soft_rst:1; + uint64_t reserved_1_63:63; +#endif + } s; + struct cvmx_rst_soft_rst_s cn70xx; + struct cvmx_rst_soft_rst_s cn70xxp1; + struct cvmx_rst_soft_rst_s cn78xx; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h index e8a1c2fd52cd..92b377e36dac 100644 --- a/arch/mips/include/asm/octeon/octeon-model.h +++ b/arch/mips/include/asm/octeon/octeon-model.h @@ -45,6 +45,7 @@ */ #define OCTEON_FAMILY_MASK 0x00ffff00 +#define OCTEON_PRID_MASK 0x00ffffff /* Flag bits in top byte */ /* Ignores revision in model checks */ @@ -63,11 +64,52 @@ #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 /* Match all cnf7XXX Octeon models. */ #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 +/* Match all cn7XXX Octeon models. */ +#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000 +#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \ + OM_MATCH_6XXX_FAMILY_MODELS | \ + OM_MATCH_F7XXX_FAMILY_MODELS | \ + OM_MATCH_7XXX_FAMILY_MODELS) +/* + * CN7XXX models with new revision encoding + */ + +#define OCTEON_CN73XX_PASS1_0 0x000d9700 +#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \ + OM_IGNORE_MINOR_REVISION) + +#define OCTEON_CN70XX_PASS1_0 0x000d9600 +#define OCTEON_CN70XX_PASS1_1 0x000d9601 +#define OCTEON_CN70XX_PASS1_2 0x000d9602 + +#define OCTEON_CN70XX_PASS2_0 0x000d9608 + +#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \ + OM_IGNORE_MINOR_REVISION) +#define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \ + OM_IGNORE_MINOR_REVISION) + +#define OCTEON_CN71XX OCTEON_CN70XX + +#define OCTEON_CN78XX_PASS1_0 0x000d9500 +#define OCTEON_CN78XX_PASS1_1 0x000d9501 +#define OCTEON_CN78XX_PASS2_0 0x000d9508 + +#define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \ + OM_IGNORE_MINOR_REVISION) +#define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \ + OM_IGNORE_MINOR_REVISION) + +#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL) /* * CNF7XXX models with new revision encoding */ #define OCTEON_CNF71XX_PASS1_0 0x000d9400 +#define OCTEON_CNF71XX_PASS1_1 0x000d9401 #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) @@ -79,6 +121,8 @@ #define OCTEON_CN68XX_PASS1_1 0x000d9101 #define OCTEON_CN68XX_PASS1_2 0x000d9102 #define OCTEON_CN68XX_PASS2_0 0x000d9108 +#define OCTEON_CN68XX_PASS2_1 0x000d9109 +#define OCTEON_CN68XX_PASS2_2 0x000d910a #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) @@ -104,11 +148,18 @@ #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) +/* CN62XX is same as CN63XX with 1 MB cache */ +#define OCTEON_CN62XX OCTEON_CN63XX + #define OCTEON_CN61XX_PASS1_0 0x000d9300 +#define OCTEON_CN61XX_PASS1_1 0x000d9301 #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) +/* CN60XX is same as CN61XX with 512 KB cache */ +#define OCTEON_CN60XX OCTEON_CN61XX + /* * CN5XXX models with new revision encoding */ @@ -120,7 +171,7 @@ #define OCTEON_CN58XX_PASS2_2 0x000d030a #define OCTEON_CN58XX_PASS2_3 0x000d030b -#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION) #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X @@ -217,12 +268,10 @@ #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) - -/* These are used to cover entire families of OCTEON processors */ -#define OCTEON_FAM_1 (OCTEON_CN3XXX) -#define OCTEON_FAM_PLUS (OCTEON_CN5XXX) -#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS) -#define OCTEON_FAM_2 (OCTEON_CN6XXX) +#define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \ + OM_MATCH_F7XXX_FAMILY_MODELS) +#define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \ + OM_MATCH_7XXX_FAMILY_MODELS) /* The revision byte (low byte) has two different encodings. * CN3XXX: @@ -232,7 +281,7 @@ * <4>: alternate package * <3:0>: revision * - * CN5XXX: + * CN5XXX and older models: * * bits * <7>: reserved (0) @@ -251,17 +300,21 @@ /* CN5XXX and later use different layout of bits in the revision ID field */ #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f -#define OCTEON_58XX_MODEL_MASK 0x00ffffc0 +#define OCTEON_58XX_MODEL_MASK 0x00ffff40 #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) -#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8) +#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38) #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 -/* forward declarations */ static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); static inline uint64_t cvmx_read_csr(uint64_t csr_addr); #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) +/* + * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) + * returns true if chip_model is identical or belong to the OCTEON + * model group specified in arg_model. + */ /* NOTE: This for internal use only! */ #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ @@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr); ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ - && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \ + && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \ ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ - && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \ + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \ ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ - && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \ + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \ + ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \ + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \ + ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \ ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ ))) @@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model) { uint32_t cpuid = cvmx_get_proc_id(); - /* - * Check for special case of mismarked 3005 samples. We only - * need to check if the sub model isn't being ignored - */ - if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) { - if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34))) - cpuid |= 0x10; - } return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); } @@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model) #define OCTEON_IS_COMMON_BINARY() 1 #undef OCTEON_MODEL +#define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX) +#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX) +#define OCTEON_IS_OCTEON2() \ + (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) + +#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX) + +#define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS()) + const char *__init octeon_model_get_string(uint32_t chip_id); /* * Return the octeon family, i.e., ProcessorID of the PrID register. + * + * @return the octeon family on success, ((unint32_t)-1) on error. */ static inline uint32_t cvmx_get_octeon_family(void) { diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 6dfefd2d5cdf..041596570856 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -9,6 +9,7 @@ #define __ASM_OCTEON_OCTEON_H #include <asm/octeon/cvmx.h> +#include <asm/bitfield.h> extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, uint64_t alignment, @@ -53,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long); #define OCTOEN_SERIAL_LEN 20 struct octeon_boot_descriptor { +#ifdef __BIG_ENDIAN_BITFIELD /* Start of block referenced by assembly code - do not change! */ uint32_t desc_version; uint32_t desc_size; @@ -104,77 +106,149 @@ struct octeon_boot_descriptor { uint8_t mac_addr_base[6]; uint8_t mac_addr_count; uint64_t cvmx_desc_vaddr; +#else + uint32_t desc_size; + uint32_t desc_version; + uint64_t stack_top; + uint64_t heap_base; + uint64_t heap_end; + /* Only used by bootloader */ + uint64_t entry_point; + uint64_t desc_vaddr; + /* End of This block referenced by assembly code - do not change! */ + uint32_t stack_size; + uint32_t exception_base_addr; + uint32_t argc; + uint32_t heap_size; + /* + * Argc count for application. + * Warning low bit scrambled in little-endian. + */ + uint32_t argv[OCTEON_ARGV_MAX_ARGS]; + +#define BOOT_FLAG_INIT_CORE (1 << 0) +#define OCTEON_BL_FLAG_DEBUG (1 << 1) +#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2) + /* If set, use uart1 for console */ +#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3) + /* If set, use PCI console */ +#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4) + /* Call exit on break on serial port */ +#define OCTEON_BL_FLAG_BREAK (1 << 5) + + uint32_t core_mask; + uint32_t flags; + /* physical address of free memory descriptor block. */ + uint32_t phy_mem_desc_addr; + /* DRAM size in megabyes. */ + uint32_t dram_size; + /* CPU clock speed, in hz. */ + uint32_t eclock_hz; + /* used to pass flags from app to debugger. */ + uint32_t debugger_flags_base_addr; + /* SPI4 clock in hz. */ + uint32_t spi_clock_hz; + /* DRAM clock speed, in hz. */ + uint32_t dclock_hz; + uint8_t chip_rev_minor; + uint8_t chip_rev_major; + uint16_t chip_type; + uint8_t board_rev_minor; + uint8_t board_rev_major; + uint16_t board_type; + + uint64_t unused1[4]; /* Not even filled in by bootloader. */ + + uint64_t cvmx_desc_vaddr; +#endif }; union octeon_cvmemctl { uint64_t u64; struct { /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t tlbbist:1; + __BITFIELD_FIELD(uint64_t tlbbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t l1cbist:1; + __BITFIELD_FIELD(uint64_t l1cbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t l1dbist:1; + __BITFIELD_FIELD(uint64_t l1dbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t dcmbist:1; + __BITFIELD_FIELD(uint64_t dcmbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t ptgbist:1; + __BITFIELD_FIELD(uint64_t ptgbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t wbfbist:1; + __BITFIELD_FIELD(uint64_t wbfbist:1, /* Reserved */ - uint64_t reserved:22; + __BITFIELD_FIELD(uint64_t reserved:17, + /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU. + * This field selects between the TLB replacement policies: + * bitmask LRU or NLU. Bitmask LRU maintains a mask of + * recently used TLB entries and avoids them as new entries + * are allocated. NLU simply guarantees that the next + * allocation is not the last used TLB entry. */ + __BITFIELD_FIELD(uint64_t tlbnlu:1, + /* OCTEON II - Selects the bit in the counter used for + * releasing a PAUSE. This counter trips every 2(8+PAUSETIME) + * cycles. If not already released, the cnMIPS II core will + * always release a given PAUSE instruction within + * 2(8+PAUSETIME). If the counter trip happens to line up, + * the cnMIPS II core may release the PAUSE instantly. */ + __BITFIELD_FIELD(uint64_t pausetime:3, + /* OCTEON II - This field is an extension of + * CvmMemCtl[DIDTTO] */ + __BITFIELD_FIELD(uint64_t didtto2:1, /* R/W If set, marked write-buffer entries time out * the same as as other entries; if clear, marked * write-buffer entries use the maximum timeout. */ - uint64_t dismarkwblongto:1; + __BITFIELD_FIELD(uint64_t dismarkwblongto:1, /* R/W If set, a merged store does not clear the * write-buffer entry timeout state. */ - uint64_t dismrgclrwbto:1; + __BITFIELD_FIELD(uint64_t dismrgclrwbto:1, /* R/W Two bits that are the MSBs of the resultant * CVMSEG LM word location for an IOBDMA. The other 8 * bits come from the SCRADDR field of the IOBDMA. */ - uint64_t iobdmascrmsb:2; + __BITFIELD_FIELD(uint64_t iobdmascrmsb:2, /* R/W If set, SYNCWS and SYNCS only order marked * stores; if clear, SYNCWS and SYNCS only order * unmarked stores. SYNCWSMARKED has no effect when * DISSYNCWS is set. */ - uint64_t syncwsmarked:1; + __BITFIELD_FIELD(uint64_t syncwsmarked:1, /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as * SYNC. */ - uint64_t dissyncws:1; + __BITFIELD_FIELD(uint64_t dissyncws:1, /* R/W If set, no stall happens on write buffer * full. */ - uint64_t diswbfst:1; + __BITFIELD_FIELD(uint64_t diswbfst:1, /* R/W If set (and SX set), supervisor-level * loads/stores can use XKPHYS addresses with * VA<48>==0 */ - uint64_t xkmemenas:1; + __BITFIELD_FIELD(uint64_t xkmemenas:1, /* R/W If set (and UX set), user-level loads/stores * can use XKPHYS addresses with VA<48>==0 */ - uint64_t xkmemenau:1; + __BITFIELD_FIELD(uint64_t xkmemenau:1, /* R/W If set (and SX set), supervisor-level * loads/stores can use XKPHYS addresses with * VA<48>==1 */ - uint64_t xkioenas:1; + __BITFIELD_FIELD(uint64_t xkioenas:1, /* R/W If set (and UX set), user-level loads/stores * can use XKPHYS addresses with VA<48>==1 */ - uint64_t xkioenau:1; + __BITFIELD_FIELD(uint64_t xkioenau:1, /* R/W If set, all stores act as SYNCW (NOMERGE must * be set when this is set) RW, reset to 0. */ - uint64_t allsyncw:1; + __BITFIELD_FIELD(uint64_t allsyncw:1, /* R/W If set, no stores merge, and all stores reach * the coherent bus in order. */ - uint64_t nomerge:1; + __BITFIELD_FIELD(uint64_t nomerge:1, /* R/W Selects the bit in the counter used for DID * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = * 214. Actual time-out is between 1x and 2x this * interval. For example, with DIDTTO=3, expiration * interval is between 16K and 32K. */ - uint64_t didtto:2; + __BITFIELD_FIELD(uint64_t didtto:2, /* R/W If set, the (mem) CSR clock never turns off. */ - uint64_t csrckalwys:1; + __BITFIELD_FIELD(uint64_t csrckalwys:1, /* R/W If set, mclk never turns off. */ - uint64_t mclkalwys:1; + __BITFIELD_FIELD(uint64_t mclkalwys:1, /* R/W Selects the bit in the counter used for write * buffer flush time-outs (WBFLT+11) is the bit * position in an internal counter used to determine @@ -182,25 +256,26 @@ union octeon_cvmemctl { * 2x this interval. For example, with WBFLT = 0, a * write buffer expires between 2K and 4K cycles after * the write buffer entry is allocated. */ - uint64_t wbfltime:3; + __BITFIELD_FIELD(uint64_t wbfltime:3, /* R/W If set, do not put Istream in the L2 cache. */ - uint64_t istrnol2:1; + __BITFIELD_FIELD(uint64_t istrnol2:1, /* R/W The write buffer threshold. */ - uint64_t wbthresh:4; + __BITFIELD_FIELD(uint64_t wbthresh:4, /* Reserved */ - uint64_t reserved2:2; + __BITFIELD_FIELD(uint64_t reserved2:2, /* R/W If set, CVMSEG is available for loads/stores in * kernel/debug mode. */ - uint64_t cvmsegenak:1; + __BITFIELD_FIELD(uint64_t cvmsegenak:1, /* R/W If set, CVMSEG is available for loads/stores in * supervisor mode. */ - uint64_t cvmsegenas:1; + __BITFIELD_FIELD(uint64_t cvmsegenas:1, /* R/W If set, CVMSEG is available for loads/stores in * user mode. */ - uint64_t cvmsegenau:1; + __BITFIELD_FIELD(uint64_t cvmsegenau:1, /* R/W Size of local memory in cache blocks, 54 (6912 * bytes) is max legal value. */ - uint64_t lmemsz:6; + __BITFIELD_FIELD(uint64_t lmemsz:6, + ;))))))))))))))))))))))))))))))))) } s; }; @@ -224,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val) cvmx_read64_uint32(address ^ 4); } +/* Octeon multiplier save/restore routines from octeon_switch.S */ +void octeon_mult_save(void); +void octeon_mult_restore(void); +void octeon_mult_save_end(void); +void octeon_mult_restore_end(void); +void octeon_mult_save3(void); +void octeon_mult_save3_end(void); +void octeon_mult_save2(void); +void octeon_mult_save2_end(void); +void octeon_mult_restore3(void); +void octeon_mult_restore3_end(void); +void octeon_mult_restore2(void); +void octeon_mult_restore2_end(void); /** * Read a 32bit value from the Octeon NPI register space diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 69529624a005..193b4c6b7541 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif +#ifdef CONFIG_PCI_DOMAINS #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index static inline int pci_proc_domain(struct pci_bus *bus) @@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) struct pci_controller *hose = bus->sysdata; return hose->need_domain_info; } +#endif /* CONFIG_PCI_DOMAINS */ #endif /* __KERNEL__ */ diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index fc807aa5ec8d..91747c282bb3 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -35,7 +35,7 @@ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) /* - * The following bits are directly used by the TLB hardware + * The following bits are implemented by the TLB hardware */ #define _PAGE_GLOBAL_SHIFT 0 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) @@ -60,43 +60,40 @@ #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) -#define _PAGE_SILENT_READ _PAGE_VALID -#define _PAGE_SILENT_WRITE _PAGE_DIRTY - #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) /* - * The following are implemented by software + * The following bits are implemented in software */ -#define _PAGE_PRESENT_SHIFT 0 -#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) -#define _PAGE_READ_SHIFT 1 -#define _PAGE_READ (1 << _PAGE_READ_SHIFT) -#define _PAGE_WRITE_SHIFT 2 -#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) -#define _PAGE_ACCESSED_SHIFT 3 -#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) -#define _PAGE_MODIFIED_SHIFT 4 -#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) +#define _PAGE_PRESENT_SHIFT (0) +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) /* - * And these are the hardware TLB bits + * The following bits are implemented by the TLB hardware */ -#define _PAGE_GLOBAL_SHIFT 8 -#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#define _PAGE_VALID_SHIFT 9 -#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) -#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ -#define _PAGE_DIRTY_SHIFT 10 +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) -#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) -#define _CACHE_UNCACHED_SHIFT 11 +#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) -#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) +#define _CACHE_MASK _CACHE_UNCACHED -#else /* 'Normal' r4K case */ +#define _PFN_SHIFT PAGE_SHIFT + +#else /* * When using the RI/XI bit support, we have 13 bits of flags below * the physical address. The RI/XI bits are placed such that a SRL 5 @@ -107,10 +104,8 @@ /* * The following bits are implemented in software - * - * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. */ -#define _PAGE_PRESENT_SHIFT (0) +#define _PAGE_PRESENT_SHIFT 0 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) @@ -125,16 +120,11 @@ /* huge tlb page */ #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) -#else -#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) -#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ -#endif - -#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT -/* huge tlb page */ #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) #else +#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) +#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ #endif @@ -149,17 +139,10 @@ #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) - #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) -/* synonym */ -#define _PAGE_SILENT_READ (_PAGE_VALID) - -/* The MIPS dirty bit */ #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) -#define _PAGE_SILENT_WRITE (_PAGE_DIRTY) - #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) #define _CACHE_MASK (7 << _CACHE_SHIFT) @@ -167,9 +150,9 @@ #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ -#ifndef _PFN_SHIFT -#define _PFN_SHIFT PAGE_SHIFT -#endif +#define _PAGE_SILENT_READ _PAGE_VALID +#define _PAGE_SILENT_WRITE _PAGE_DIRTY + #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) #ifndef _PAGE_NO_READ @@ -179,9 +162,6 @@ #ifndef _PAGE_NO_EXEC #define _PAGE_NO_EXEC ({BUG(); 0; }) #endif -#ifndef _PAGE_GLOBAL_SHIFT -#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) -#endif #ifndef __ASSEMBLY__ @@ -266,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) #endif #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) -#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) +#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) -#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) +#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PFN_MASK | _CACHE_MASK) #endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 583ff4215479..bef782c4a44b 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -99,29 +99,35 @@ extern void paging_init(void); #define htw_stop() \ do { \ - if (cpu_has_htw) \ - write_c0_pwctl(read_c0_pwctl() & \ - ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ + unsigned long flags; \ + \ + if (cpu_has_htw) { \ + local_irq_save(flags); \ + if(!raw_current_cpu_data.htw_seq++) { \ + write_c0_pwctl(read_c0_pwctl() & \ + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ + back_to_back_c0_hazard(); \ + } \ + local_irq_restore(flags); \ + } \ } while(0) #define htw_start() \ do { \ - if (cpu_has_htw) \ - write_c0_pwctl(read_c0_pwctl() | \ - (1 << MIPS_PWCTL_PWEN_SHIFT)); \ -} while(0) - - -#define htw_reset() \ -do { \ + unsigned long flags; \ + \ if (cpu_has_htw) { \ - htw_stop(); \ - back_to_back_c0_hazard(); \ - htw_start(); \ - back_to_back_c0_hazard(); \ + local_irq_save(flags); \ + if (!--raw_current_cpu_data.htw_seq) { \ + write_c0_pwctl(read_c0_pwctl() | \ + (1 << MIPS_PWCTL_PWEN_SHIFT)); \ + back_to_back_c0_hazard(); \ + } \ + local_irq_restore(flags); \ } \ } while(0) + extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); @@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt { pte_t null = __pte(0); + htw_stop(); /* Preserve global status for the pair */ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) null.pte_low = null.pte_high = _PAGE_GLOBAL; set_pte_at(mm, addr, ptep, null); - htw_reset(); + htw_start(); } #else @@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + htw_stop(); #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) @@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt else #endif set_pte_at(mm, addr, ptep, __pte(0)); - htw_reset(); + htw_start(); } #endif @@ -334,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } -#ifdef _PAGE_HUGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } static inline pte_t pte_mkhuge(pte_t pte) @@ -342,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte) pte_val(pte) |= _PAGE_HUGE; return pte; } -#endif /* _PAGE_HUGE */ +#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #endif static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index f1df4cb4a286..b5dcbee01fd7 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count; #define TASK_SIZE 0x7fff8000UL #endif -#ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE -#endif #define TASK_IS_32BIT_ADDR 1 @@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count; #define TASK_SIZE32 0x7fff8000UL #define TASK_SIZE64 0x10000000000UL #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) - -#ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE64 -#endif - #define TASK_SIZE_OF(tsk) \ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) @@ -211,6 +205,8 @@ struct octeon_cop2_state { unsigned long cop2_gfm_poly; /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ unsigned long cop2_gfm_result[2]; + /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */ + unsigned long cop2_sha3[2]; }; #define COP2_INIT \ .cp2 = {0,}, @@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p); #endif +/* + * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options + * to the prctl syscall. + */ +extern int mips_get_process_fp_mode(struct task_struct *task); +extern int mips_set_process_fp_mode(struct task_struct *task, + unsigned int value); + +#define GET_FP_MODE(task) mips_get_process_fp_mode(task) +#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value) + #endif /* _ASM_PROCESSOR_H */ diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index eaa26270a5e5..8ebc2aa5f3e1 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -24,13 +24,6 @@ struct boot_param_header; extern void __dt_setup_arch(void *bph); extern int __dt_register_buses(const char *bus0, const char *bus1); -#define dt_setup_arch(sym) \ -({ \ - extern char __dtb_##sym##_begin[]; \ - \ - __dt_setup_arch(__dtb_##sym##_begin); \ -}) - #else /* CONFIG_OF */ static inline void device_tree_init(void) { } #endif /* CONFIG_OF */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index fc783f843bdc..ffc320389f40 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -40,8 +40,8 @@ struct pt_regs { unsigned long cp0_cause; unsigned long cp0_epc; #ifdef CONFIG_CPU_CAVIUM_OCTEON - unsigned long long mpl[3]; /* MTM{0,1,2} */ - unsigned long long mtp[3]; /* MTP{0,1,2} */ + unsigned long long mpl[6]; /* MTM{0-5} */ + unsigned long long mtp[6]; /* MTP{0-5} */ #endif } __aligned(8); diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index e293a8d89a6d..1b22d2da88a1 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -14,6 +14,7 @@ #include <asm/asm.h> #include <asm/cacheops.h> +#include <asm/compiler.h> #include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/mipsmtregs.h> @@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void); __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ " cache %0, %1 \n" \ " .set pop \n" \ : \ @@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr) __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: cache %0, (%1) \n" \ "2: .set pop \n" \ " .section __ex_table,\"a\" \n" \ @@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr) cache_op(Page_Invalidate_T, addr); } +#ifndef CONFIG_CPU_MIPSR6 #define cache16_unroll32(base,op) \ __asm__ __volatile__( \ " .set push \n" \ @@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr) : "r" (base), \ "i" (op)); +#else +/* + * MIPS R6 changed the cache opcode and moved to a 8-bit offset field. + * This means we now need to increment the base register before we flush + * more cache lines + */ +#define cache16_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ + " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ + " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \ + " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \ + " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \ + " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ + " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ + " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ + " addiu $1, $0, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ + " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ + " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ + " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ + " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); + +#define cache32_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ + " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ + " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ + " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " addiu $1, $1, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " addiu $1, $1, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); + +#define cache64_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ + " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); + +#define cache128_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); +#endif /* CONFIG_CPU_MIPSR6 */ + /* * Perform the cache operation specified by op using a user mode virtual * address while in kernel mode. diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h index 753275accd18..195db5045ae5 100644 --- a/arch/mips/include/asm/sgialib.h +++ b/arch/mips/include/asm/sgialib.h @@ -11,6 +11,7 @@ #ifndef _ASM_SGIALIB_H #define _ASM_SGIALIB_H +#include <linux/compiler.h> #include <asm/sgiarcs.h> extern struct linux_romvec *romvec; @@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt); extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); /* Misc. routines. */ -extern VOID ArcReboot(VOID) __attribute__((noreturn)); -extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn)); +extern VOID ArcHalt(VOID) __noreturn; +extern VOID ArcPowerDown(VOID) __noreturn; +extern VOID ArcRestart(VOID) __noreturn; +extern VOID ArcReboot(VOID) __noreturn; +extern VOID ArcEnterInteractiveMode(VOID) __noreturn; extern VOID ArcFlushAllCaches(VOID); extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h deleted file mode 100644 index dd9a762646fc..000000000000 --- a/arch/mips/include/asm/siginfo.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle - * Copyright (C) 2000, 2001 Silicon Graphics, Inc. - */ -#ifndef _ASM_SIGINFO_H -#define _ASM_SIGINFO_H - -#include <uapi/asm/siginfo.h> - - -/* - * Duplicated here because of <asm-generic/siginfo.h> braindamage ... - */ -#include <linux/string.h> - -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); -} - -#endif /* _ASM_SIGINFO_H */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index c6d06d383ef9..b4548690ade9 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " bltz %1, 1b \n" " addu %1, 1 \n" "2: sc %1, %0 \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw) smp_llsc_mb(); } -/* Note the use of sub, not subu which will make the kernel die with an - overflow exception if we ever try to unlock an rwlock that is already - unlocked or is being held by a writer. */ static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( "1: ll %1, %2 # arch_read_unlock \n" - " sub %1, 1 \n" + " addiu %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { __asm__ __volatile__( "1: ll %1, %2 # arch_read_unlock \n" - " sub %1, 1 \n" + " addiu %1, -1 \n" " sc %1, %0 \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " bnez %1, 1b \n" " lui %1, 0x8000 \n" "2: sc %1, %0 \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { __asm__ __volatile__( @@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } @@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " li %2, 1 \n" " .set reorder \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " sc %1, %0 \n" " li %2, 1 \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h index 0b89006e4907..0f90d88e464d 100644 --- a/arch/mips/include/asm/spram.h +++ b/arch/mips/include/asm/spram.h @@ -1,10 +1,10 @@ #ifndef _MIPS_SPRAM_H #define _MIPS_SPRAM_H -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_MIPS_SPRAM) extern __init void spram_config(void); #else static inline void spram_config(void) { }; -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_MIPS_SPRAM */ #endif /* _MIPS_SPRAM_H */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index b188c797565c..28d6d9364bd1 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -40,7 +40,7 @@ LONG_S v1, PT_HI(sp) mflhxu v1 LONG_S v1, PT_ACX(sp) -#else +#elif !defined(CONFIG_CPU_MIPSR6) mfhi v1 #endif #ifdef CONFIG_32BIT @@ -50,7 +50,7 @@ LONG_S $10, PT_R10(sp) LONG_S $11, PT_R11(sp) LONG_S $12, PT_R12(sp) -#ifndef CONFIG_CPU_HAS_SMARTMIPS +#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_HI(sp) mflo v1 #endif @@ -58,7 +58,7 @@ LONG_S $14, PT_R14(sp) LONG_S $15, PT_R15(sp) LONG_S $24, PT_R24(sp) -#ifndef CONFIG_CPU_HAS_SMARTMIPS +#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_LO(sp) #endif #ifdef CONFIG_CPU_CAVIUM_OCTEON @@ -226,7 +226,7 @@ mtlhx $24 LONG_L $24, PT_LO(sp) mtlhx $24 -#else +#elif !defined(CONFIG_CPU_MIPSR6) LONG_L $24, PT_LO(sp) mtlo $24 LONG_L $24, PT_HI(sp) diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index b928b6f898cd..e92d6c4b5ed1 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -75,9 +75,12 @@ do { \ #endif #define __clear_software_ll_bit() \ -do { \ - if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ - ll_bit = 0; \ +do { if (cpu_has_rw_llb) { \ + write_c0_lladdr(0); \ + } else { \ + if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\ + ll_bit = 0; \ + } \ } while (0) #define switch_to(prev, next, last) \ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 9e1295f874f0..55ed6602204c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -28,7 +28,7 @@ struct thread_info { unsigned long tp_value; /* thread pointer */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - + int r2_emul_return; /* 1 => Returning from R2 emulator */ mm_segment_t addr_limit; /* * thread address space limit: * 0x7fffffff for user-thead diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 89c22433b1c6..fc0cf5ac0cf7 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -21,20 +21,20 @@ enum major_op { spec_op, bcond_op, j_op, jal_op, beq_op, bne_op, blez_op, bgtz_op, - addi_op, addiu_op, slti_op, sltiu_op, + addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op, andi_op, ori_op, xori_op, lui_op, cop0_op, cop1_op, cop2_op, cop1x_op, beql_op, bnel_op, blezl_op, bgtzl_op, - daddi_op, daddiu_op, ldl_op, ldr_op, + daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op, spec2_op, jalx_op, mdmx_op, spec3_op, lb_op, lh_op, lwl_op, lw_op, lbu_op, lhu_op, lwr_op, lwu_op, sb_op, sh_op, swl_op, sw_op, sdl_op, sdr_op, swr_op, cache_op, - ll_op, lwc1_op, lwc2_op, pref_op, - lld_op, ldc1_op, ldc2_op, ld_op, - sc_op, swc1_op, swc2_op, major_3b_op, - scd_op, sdc1_op, sdc2_op, sd_op + ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, + lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, + sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, + scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op }; /* @@ -83,9 +83,12 @@ enum spec3_op { swe_op = 0x1f, bshfl_op = 0x20, swle_op = 0x21, swre_op = 0x22, prefe_op = 0x23, dbshfl_op = 0x24, - lbue_op = 0x28, lhue_op = 0x29, - lbe_op = 0x2c, lhe_op = 0x2d, - lle_op = 0x2e, lwe_op = 0x2f, + cache6_op = 0x25, sc6_op = 0x26, + scd6_op = 0x27, lbue_op = 0x28, + lhue_op = 0x29, lbe_op = 0x2c, + lhe_op = 0x2d, lle_op = 0x2e, + lwe_op = 0x2f, pref6_op = 0x35, + ll6_op = 0x36, lld6_op = 0x37, rdhwr_op = 0x3b }; @@ -112,7 +115,8 @@ enum cop_op { mfhc_op = 0x03, mtc_op = 0x04, dmtc_op = 0x05, ctc_op = 0x06, mthc0_op = 0x06, mthc_op = 0x07, - bc_op = 0x08, cop_op = 0x10, + bc_op = 0x08, bc1eqz_op = 0x09, + bc1nez_op = 0x0d, cop_op = 0x10, copm_op = 0x18 }; diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index d08f83f19db5..2cb7fdead570 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -16,13 +16,6 @@ #define HAVE_ARCH_SIGINFO_T /* - * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked - * by design ... - */ -#define HAVE_ARCH_COPY_SIGINFO -struct siginfo; - -/* * Careful to keep union _sifields from shifting ... */ #if _MIPS_SZLONG == 32 @@ -35,8 +28,9 @@ struct siginfo; #define __ARCH_SIGSYS -#include <asm-generic/siginfo.h> +#include <uapi/asm-generic/siginfo.h> +/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ typedef struct siginfo { int si_signo; int si_code; @@ -124,5 +118,6 @@ typedef struct siginfo { #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ +#include <asm-generic/siginfo.h> #endif /* _UAPI_ASM_SIGINFO_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index c454525e7695..9dd051edb411 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -140,10 +140,18 @@ static void qi_lb60_nand_ident(struct platform_device *pdev, static struct jz_nand_platform_data qi_lb60_nand_pdata = { .ident_callback = qi_lb60_nand_ident, - .busy_gpio = 94, .banks = { 1 }, }; +static struct gpiod_lookup_table qi_lb60_nand_gpio_table = { + .dev_id = "jz4740-nand.0", + .table = { + GPIO_LOOKUP("Bank C", 30, "busy", 0), + { }, + }, +}; + + /* Keyboard*/ #define KEY_QI_QI KEY_F13 @@ -472,6 +480,7 @@ static int __init qi_lb60_init_platform_devices(void) jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); + gpiod_add_lookup_table(&qi_lb60_nand_gpio_table); jz4740_serial_device_register(); diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 92987d1bbe5f..d3d2ff2d76dc 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o -obj-$(CONFIG_CPU_MIPSR2) += spram.o +obj-$(CONFIG_MIPS_SPRAM) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o @@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o +obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 3b2dfdb4865f..750d67ac41e9 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -97,6 +97,7 @@ void output_thread_info_defines(void) OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); @@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void) OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); + OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); OFFSET(THREAD_CP2, task_struct, thread.cp2); OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); BLANK(); diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d7d99d601cc..c2e0f45ddf6c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -16,6 +16,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/inst.h> +#include <asm/mips-r2-to-r6-emul.h> #include <asm/ptrace.h> #include <asm/uaccess.h> @@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) * @returns: -EFAULT on error and forces SIGBUS, and on success * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after * evaluating the branch. + * + * MIPS R6 Compact branches and forbidden slots: + * Compact branches do not throw exceptions because they do + * not have delay slots. The forbidden slot instruction ($PC+4) + * is only executed if the branch was not taken. Otherwise the + * forbidden slot is skipped entirely. This means that the + * only possible reason to be here because of a MIPS R6 compact + * branch instruction is that the forbidden slot has thrown one. + * In that case the branch was not taken, so the EPC can be safely + * set to EPC + 8. */ int __compute_return_epc_for_insn(struct pt_regs *regs, union mips_instruction insn) { - unsigned int bit, fcr31, dspcontrol; + unsigned int bit, fcr31, dspcontrol, reg; long epc = regs->cp0_epc; int ret = 0; @@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->regs[insn.r_format.rd] = epc + 8; /* Fall through */ case jr_op: + if (NO_R6EMU && insn.r_format.func == jr_op) + goto sigill_r6; regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } @@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case bcond_op: switch (insn.i_format.rt) { - case bltz_op: case bltzl_op: + if (NO_R6EMU) + goto sigill_r6; + case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bltzl_op) @@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bgez_op: case bgezl_op: + if (NO_R6EMU) + goto sigill_r6; + case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bgezl_op) @@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzal_op: case bltzall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bltzall_op)) { + ret = -SIGILL; + break; + } regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a NAL + * instruction or because we are emulating an + * old bltzal{,l} one. Lets figure out what the + * case really is. + */ + if (!insn.i_format.rs) { + /* + * NAL or BLTZAL with rs == 0 + * Doesn't matter if we are R6 or not. The + * result is the same + */ + regs->cp0_epc += 4 + + (insn.i_format.simmediate << 2); + break; + } + /* Now do the real thing for non-R6 BLTZAL{,L} */ if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bltzall_op) @@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezal_op: case bgezall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bgezall_op)) { + ret = -SIGILL; + break; + } regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a BAL + * instruction or because we are emulating an + * old bgezal{,l} one. Lets figure out what the + * case really is. + */ + if (!insn.i_format.rs) { + /* + * BAL or BGEZAL with rs == 0 + * Doesn't matter if we are R6 or not. The + * result is the same + */ + regs->cp0_epc += 4 + + (insn.i_format.simmediate << 2); + break; + } + /* Now do the real thing for non-R6 BGEZAL{,L} */ if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bgezall_op) @@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bposge32_op: if (!cpu_has_dsp) - goto sigill; + goto sigill_dsp; dspcontrol = rddsp(0x01); @@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* * These are conditional and in i_format. */ - case beq_op: case beql_op: + if (NO_R6EMU) + goto sigill_r6; + case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bne_op: case bnel_op: + if (NO_R6EMU) + goto sigill_r6; + case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case blez_op: /* not really i_format */ - case blezl_op: + case blezl_op: /* not really i_format */ + if (NO_R6EMU) + goto sigill_r6; + case blez_op: + /* + * Compact branches for R6 for the + * blez and blezl opcodes. + * BLEZ | rs = 0 | rt != 0 == BLEZALC + * BLEZ | rs = rt != 0 == BGEZALC + * BLEZ | rs != 0 | rt != 0 == BGEUC + * BLEZL | rs = 0 | rt != 0 == BLEZC + * BLEZL | rs = rt != 0 == BGEZC + * BLEZL | rs != 0 | rt != 0 == BGEC + * + * For real BLEZ{,L}, rt is always 0. + */ + + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; + } /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bgtz_op: case bgtzl_op: + if (NO_R6EMU) + goto sigill_r6; + case bgtz_op: + /* + * Compact branches for R6 for the + * bgtz and bgtzl opcodes. + * BGTZ | rs = 0 | rt != 0 == BGTZALC + * BGTZ | rs = rt != 0 == BLTZALC + * BGTZ | rs != 0 | rt != 0 == BLTUC + * BGTZL | rs = 0 | rt != 0 == BGTZC + * BGTZL | rs = rt != 0 == BLTZC + * BGTZL | rs != 0 | rt != 0 == BLTC + * + * *ZALC varint for BGTZ &&& rt != 0 + * For real GTZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; + } + /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, * And now the FPA/cp1 branch instructions. */ case cop1_op: - preempt_disable(); - if (is_fpu_owner()) - fcr31 = read_32bit_cp1_register(CP1_STATUS); - else - fcr31 = current->thread.fpu.fcr31; - preempt_enable(); - - bit = (insn.i_format.rt >> 2); - bit += (bit != 0); - bit += 23; - switch (insn.i_format.rt & 3) { - case 0: /* bc1f */ - case 2: /* bc1fl */ - if (~fcr31 & (1 << bit)) { - epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == 2) - ret = BRANCH_LIKELY_TAKEN; - } else + if (cpu_has_mips_r6 && + ((insn.i_format.rs == bc1eqz_op) || + (insn.i_format.rs == bc1nez_op))) { + if (!used_math()) { /* First time FPU user */ + ret = init_fpu(); + if (ret && NO_R6EMU) { + ret = -ret; + break; + } + ret = 0; + set_used_math(); + } + lose_fpu(1); /* Save FPU state for the emulator. */ + reg = insn.i_format.rt; + bit = 0; + switch (insn.i_format.rs) { + case bc1eqz_op: + /* Test bit 0 */ + if (get_fpr32(¤t->thread.fpu.fpr[reg], 0) + & 0x1) + bit = 1; + break; + case bc1nez_op: + /* Test bit 0 */ + if (!(get_fpr32(¤t->thread.fpu.fpr[reg], 0) + & 0x1)) + bit = 1; + break; + } + own_fpu(1); + if (bit) + epc = epc + 4 + + (insn.i_format.simmediate << 2); + else epc += 8; regs->cp0_epc = epc; + break; + } else { - case 1: /* bc1t */ - case 3: /* bc1tl */ - if (fcr31 & (1 << bit)) { - epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == 3) - ret = BRANCH_LIKELY_TAKEN; - } else - epc += 8; - regs->cp0_epc = epc; + preempt_disable(); + if (is_fpu_owner()) + fcr31 = read_32bit_cp1_register(CP1_STATUS); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + bit = (insn.i_format.rt >> 2); + bit += (bit != 0); + bit += 23; + switch (insn.i_format.rt & 3) { + case 0: /* bc1f */ + case 2: /* bc1fl */ + if (~fcr31 & (1 << bit)) { + epc = epc + 4 + + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 2) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case 1: /* bc1t */ + case 3: /* bc1tl */ + if (fcr31 & (1 << bit)) { + epc = epc + 4 + + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 3) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + } break; } - break; #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) @@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, epc += 8; regs->cp0_epc = epc; break; +#else + case bc6_op: + /* Only valid for MIPS R6 */ + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + regs->cp0_epc += 8; + break; + case balc6_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BALC */ + regs->regs[31] = epc + 4; + epc += 4 + (insn.i_format.simmediate << 2); + regs->cp0_epc = epc; + break; + case beqzcjic_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BEQZC || JIC */ + regs->cp0_epc += 8; + break; + case bnezcjialc_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BNEZC || JIALC */ + if (insn.i_format.rs) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; #endif + case cbcond0_op: + case cbcond1_op: + /* Only valid for MIPS R6 */ + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* + * Compact branches: + * bovc, beqc, beqzalc, bnvc, bnec, bnezlac + */ + if (insn.i_format.rt && !insn.i_format.rs) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; } return ret; -sigill: +sigill_dsp: printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; +sigill_r6: + pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); + return -EFAULT; } EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 6acaad0480af..82bd2b278a24 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -11,7 +11,6 @@ #include <linux/percpu.h> #include <linux/smp.h> #include <linux/irq.h> -#include <linux/irqchip/mips-gic.h> #include <asm/time.h> #include <asm/cevt-r4k.h> @@ -40,7 +39,7 @@ int cp0_timer_irq_installed; irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { - const int r2 = cpu_has_mips_r2; + const int r2 = cpu_has_mips_r2_r6; struct clock_event_device *cd; int cpu = smp_processor_id(); @@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev) */ static int c0_compare_int_pending(void) { -#ifdef CONFIG_MIPS_GIC - if (gic_present) - return gic_get_timer_pending(); -#endif + /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); } diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 0384b05ab5a0..55b759a0019e 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -99,11 +99,11 @@ not_nmi: xori t2, t1, 0x7 beqz t2, 1f li t3, 32 - addi t1, t1, 1 + addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == I-cache sets per way */ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ - addi t2, t2, 1 + addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -126,11 +126,11 @@ icache_done: xori t2, t1, 0x7 beqz t2, 1f li t3, 32 - addi t1, t1, 1 + addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == D-cache sets per way */ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ - addi t2, t2, 1 + addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -250,7 +250,7 @@ LEAF(mips_cps_core_init) mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) - addi t7, t0, 1 + addiu t7, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f @@ -280,7 +280,7 @@ LEAF(mips_cps_core_init) mttc0 t0, CP0_TCHALT /* Next VPE */ - addi t5, t5, 1 + addiu t5, t5, 1 slt t0, t5, t7 bnez t0, 1b nop @@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes) mfc0 t1, CP0_MVPCONF0 srl t1, t1, MVPCONF0_PVPE_SHIFT andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT - addi t1, t1, 1 + addiu t1, t1, 1 /* Calculate a mask for the VPE ID from EBase.CPUNum */ clz t1, t1 @@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes) /* Next VPE */ 2: srl t6, t6, 1 - addi t5, t5, 1 + addiu t5, t5, 1 bnez t6, 1b nop diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index 2d80b5f1aeae..09f4034f239f 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -244,7 +244,7 @@ static inline void check_daddi(void) panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -int daddiu_bug = -1; +int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; static inline void check_daddiu(void) { @@ -314,11 +314,14 @@ static inline void check_daddiu(void) void __init check_bugs64_early(void) { - check_mult_sh(); - check_daddiu(); + if (!config_enabled(CONFIG_CPU_MIPSR6)) { + check_mult_sh(); + check_daddiu(); + } } void __init check_bugs64(void) { - check_daddi(); + if (!config_enabled(CONFIG_CPU_MIPSR6)) + check_daddi(); } diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5342674842f5..48dfb9de853d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; + /* R6 incompatible with everything else */ + case MIPS_CPU_ISA_M64R6: + c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; + case MIPS_CPU_ISA_M32R6: + c->isa_level |= MIPS_CPU_ISA_M32R6; + /* Break here so we don't add incompatible ISAs */ + break; case MIPS_CPU_ISA_M32R2: c->isa_level |= MIPS_CPU_ISA_M32R2; case MIPS_CPU_ISA_M32R1: @@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 1: set_isa(c, MIPS_CPU_ISA_M32R2); break; + case 2: + set_isa(c, MIPS_CPU_ISA_M32R6); + break; default: goto unknown; } @@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 1: set_isa(c, MIPS_CPU_ISA_M64R2); break; + case 2: + set_isa(c, MIPS_CPU_ISA_M64R6); + break; default: goto unknown; } @@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) if (config3 & MIPS_CONF3_MSA) c->ases |= MIPS_ASE_MSA; /* Only tested on 32-bit cores */ - if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) + if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { + c->htw_seq = 0; c->options |= MIPS_CPU_HTW; + } return config3 & MIPS_CONF_M; } @@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) c->options |= MIPS_CPU_EVA; if (config5 & MIPS_CONF5_MRP) c->options |= MIPS_CPU_MAAR; + if (config5 & MIPS_CONF5_LLB) + c->options |= MIPS_CPU_RW_LLB; return config5 & MIPS_CONF_M; } @@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c) if (cpu_has_rixi) { /* Enable the RIXI exceptions */ - write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); + set_c0_pagegrain(PG_IEC); back_to_back_c0_hazard(); /* Verify the IEC bit is set */ if (read_c0_pagegrain() & PG_IEC) @@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c) } #ifndef CONFIG_MIPS_CPS - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { c->core = get_ebase_cpunum(); if (cpu_has_mipsmt) c->core >>= fls(core_nvpes()) - 1; @@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { c->writecombine = _CACHE_UNCACHED_ACCELERATED; switch (c->processor_id & PRID_IMP_MASK) { + case PRID_IMP_QEMU_GENERIC: + c->writecombine = _CACHE_UNCACHED; + c->cputype = CPU_QEMU_GENERIC; + __cpu_name[cpu] = "MIPS GENERIC QEMU"; + break; case PRID_IMP_4KC: c->cputype = CPU_4KC; c->writecombine = _CACHE_UNCACHED; @@ -1345,8 +1367,7 @@ void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { + if (c->isa_level & cpu_has_mips_r) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; if (c->fpu_id & MIPS_FPIR_FREP) @@ -1354,7 +1375,7 @@ void cpu_probe(void) } } - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; /* R2 has Performance Counter Interrupt indicator */ c->options |= MIPS_CPU_PCI; diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index a5b5b56485c1..d2c09f6475c5 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -11,29 +11,112 @@ #include <linux/elf.h> #include <linux/sched.h> +/* FPU modes */ enum { - FP_ERROR = -1, - FP_DOUBLE_64A = -2, + FP_FRE, + FP_FR0, + FP_FR1, }; +/** + * struct mode_req - ABI FPU mode requirements + * @single: The program being loaded needs an FPU but it will only issue + * single precision instructions meaning that it can execute in + * either FR0 or FR1. + * @soft: The soft(-float) requirement means that the program being + * loaded needs has no FPU dependency at all (i.e. it has no + * FPU instructions). + * @fr1: The program being loaded depends on FPU being in FR=1 mode. + * @frdefault: The program being loaded depends on the default FPU mode. + * That is FR0 for O32 and FR1 for N32/N64. + * @fre: The program being loaded depends on FPU with FRE=1. This mode is + * a bridge which uses FR=1 whilst still being able to maintain + * full compatibility with pre-existing code using the O32 FP32 + * ABI. + * + * More information about the FP ABIs can be found here: + * + * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up + * + */ + +struct mode_req { + bool single; + bool soft; + bool fr1; + bool frdefault; + bool fre; +}; + +static const struct mode_req fpu_reqs[] = { + [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, + [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, + [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, + [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, + [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, + [MIPS_ABI_FP_XX] = { false, false, true, true, true }, + [MIPS_ABI_FP_64] = { false, false, true, false, false }, + [MIPS_ABI_FP_64A] = { false, false, true, false, true } +}; + +/* + * Mode requirements when .MIPS.abiflags is not present in the ELF. + * Not present means that everything is acceptable except FR1. + */ +static struct mode_req none_req = { true, true, false, true, true }; + int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool is_interp, struct arch_elf_state *state) { - struct elf32_hdr *ehdr = _ehdr; - struct elf32_phdr *phdr = _phdr; + struct elf32_hdr *ehdr32 = _ehdr; + struct elf32_phdr *phdr32 = _phdr; + struct elf64_phdr *phdr64 = _phdr; struct mips_elf_abiflags_v0 abiflags; int ret; - if (config_enabled(CONFIG_64BIT) && - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) - return 0; - if (phdr->p_type != PT_MIPS_ABIFLAGS) - return 0; - if (phdr->p_filesz < sizeof(abiflags)) - return -EINVAL; + /* Lets see if this is an O32 ELF */ + if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { + /* FR = 1 for N32 */ + if (ehdr32->e_flags & EF_MIPS_ABI2) + state->overall_fp_mode = FP_FR1; + else + /* Set a good default FPU mode for O32 */ + state->overall_fp_mode = cpu_has_mips_r6 ? + FP_FRE : FP_FR0; + + if (ehdr32->e_flags & EF_MIPS_FP64) { + /* + * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it + * later if needed + */ + if (is_interp) + state->interp_fp_abi = MIPS_ABI_FP_OLD_64; + else + state->fp_abi = MIPS_ABI_FP_OLD_64; + } + if (phdr32->p_type != PT_MIPS_ABIFLAGS) + return 0; + + if (phdr32->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr32->p_offset, + (char *)&abiflags, + sizeof(abiflags)); + } else { + /* FR=1 is really the only option for 64-bit */ + state->overall_fp_mode = FP_FR1; + + if (phdr64->p_type != PT_MIPS_ABIFLAGS) + return 0; + if (phdr64->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr64->p_offset, + (char *)&abiflags, + sizeof(abiflags)); + } - ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, - sizeof(abiflags)); if (ret < 0) return ret; if (ret != sizeof(abiflags)) @@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, return 0; } -static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) +static inline unsigned get_fp_abi(int in_abi) { /* If the ABI requirement is provided, simply return that */ - if (in_abi != -1) + if (in_abi != MIPS_ABI_FP_UNKNOWN) return in_abi; - /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ - if (ehdr->e_flags & EF_MIPS_FP64) - return MIPS_ABI_FP_64; - - /* Default to MIPS_ABI_FP_DOUBLE */ - return MIPS_ABI_FP_DOUBLE; + /* Unknown ABI */ + return MIPS_ABI_FP_UNKNOWN; } int arch_check_elf(void *_ehdr, bool has_interpreter, struct arch_elf_state *state) { struct elf32_hdr *ehdr = _ehdr; - unsigned fp_abi, interp_fp_abi, abi0, abi1; + struct mode_req prog_req, interp_req; + int fp_abi, interp_fp_abi, abi0, abi1, max_abi; - /* Ignore non-O32 binaries */ - if (config_enabled(CONFIG_64BIT) && - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) return 0; - fp_abi = get_fp_abi(ehdr, state->fp_abi); + fp_abi = get_fp_abi(state->fp_abi); if (has_interpreter) { - interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); + interp_fp_abi = get_fp_abi(state->interp_fp_abi); abi0 = min(fp_abi, interp_fp_abi); abi1 = max(fp_abi, interp_fp_abi); @@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, abi0 = abi1 = fp_abi; } - state->overall_abi = FP_ERROR; - - if (abi0 == abi1) { - state->overall_abi = abi0; - } else if (abi0 == MIPS_ABI_FP_ANY) { - state->overall_abi = abi1; - } else if (abi0 == MIPS_ABI_FP_DOUBLE) { - switch (abi1) { - case MIPS_ABI_FP_XX: - state->overall_abi = MIPS_ABI_FP_DOUBLE; - break; - - case MIPS_ABI_FP_64A: - state->overall_abi = FP_DOUBLE_64A; - break; - } - } else if (abi0 == MIPS_ABI_FP_SINGLE || - abi0 == MIPS_ABI_FP_SOFT) { - /* Cannot link with other ABIs */ - } else if (abi0 == MIPS_ABI_FP_OLD_64) { - switch (abi1) { - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - state->overall_abi = MIPS_ABI_FP_64; - break; - } - } else if (abi0 == MIPS_ABI_FP_XX || - abi0 == MIPS_ABI_FP_64 || - abi0 == MIPS_ABI_FP_64A) { - state->overall_abi = MIPS_ABI_FP_64; - } + /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ + max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && + (!(ehdr->e_flags & EF_MIPS_ABI2))) ? + MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; - switch (state->overall_abi) { - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - case FP_DOUBLE_64A: - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) - return -ELIBBAD; - break; + if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || + (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) + return -ELIBBAD; + + /* It's time to determine the FPU mode requirements */ + prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; + interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; - case FP_ERROR: + /* + * Check whether the program's and interp's ABIs have a matching FPU + * mode requirement. + */ + prog_req.single = interp_req.single && prog_req.single; + prog_req.soft = interp_req.soft && prog_req.soft; + prog_req.fr1 = interp_req.fr1 && prog_req.fr1; + prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; + prog_req.fre = interp_req.fre && prog_req.fre; + + /* + * Determine the desired FPU mode + * + * Decision making: + * + * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This + * means that we have a combination of program and interpreter + * that inherently require the hybrid FP mode. + * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or + * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU + * instructions so we don't care about the mode. We will simply use + * the one preferred by the hardware. In fpxx case, that ABI can + * handle both FR=1 and FR=0, so, again, we simply choose the one + * preferred by the hardware. Next, if we only use single-precision + * FPU instructions, and the default ABI FPU mode is not good + * (ie single + any ABI combination), we set again the FPU mode to the + * one is preferred by the hardware. Next, if we know that the code + * will only use single-precision instructions, shown by single being + * true but frdefault being false, then we again set the FPU mode to + * the one that is preferred by the hardware. + * - We want FP_FR1 if that's the only matching mode and the default one + * is not good. + * - Return with -ELIBADD if we can't find a matching FPU mode. + */ + if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) + state->overall_fp_mode = FP_FRE; + else if ((prog_req.fr1 && prog_req.frdefault) || + (prog_req.single && !prog_req.frdefault)) + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ + state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && + cpu_has_mips_r2_r6) ? + FP_FR1 : FP_FR0; + else if (prog_req.fr1) + state->overall_fp_mode = FP_FR1; + else if (!prog_req.fre && !prog_req.frdefault && + !prog_req.fr1 && !prog_req.single && !prog_req.soft) return -ELIBBAD; - } return 0; } -void mips_set_personality_fp(struct arch_elf_state *state) +static inline void set_thread_fp_mode(int hybrid, int regs32) { - if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { - /* - * Use hybrid FPRs for all code which can correctly execute - * with that mode. - */ - switch (state->overall_abi) { - case MIPS_ABI_FP_DOUBLE: - case MIPS_ABI_FP_SINGLE: - case MIPS_ABI_FP_SOFT: - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_ANY: - /* FR=1, FRE=1 */ - clear_thread_flag(TIF_32BIT_FPREGS); - set_thread_flag(TIF_HYBRID_FPREGS); - return; - } - } - - switch (state->overall_abi) { - case MIPS_ABI_FP_DOUBLE: - case MIPS_ABI_FP_SINGLE: - case MIPS_ABI_FP_SOFT: - /* FR=0 */ - set_thread_flag(TIF_32BIT_FPREGS); + if (hybrid) + set_thread_flag(TIF_HYBRID_FPREGS); + else clear_thread_flag(TIF_HYBRID_FPREGS); - break; - - case FP_DOUBLE_64A: - /* FR=1, FRE=1 */ + if (regs32) + set_thread_flag(TIF_32BIT_FPREGS); + else clear_thread_flag(TIF_32BIT_FPREGS); - set_thread_flag(TIF_HYBRID_FPREGS); - break; +} - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - /* FR=1, FRE=0 */ - clear_thread_flag(TIF_32BIT_FPREGS); - clear_thread_flag(TIF_HYBRID_FPREGS); - break; +void mips_set_personality_fp(struct arch_elf_state *state) +{ + /* + * This function is only ever called for O32 ELFs so we should + * not be worried about N32/N64 binaries. + */ - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_ANY: - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) - set_thread_flag(TIF_32BIT_FPREGS); - else - clear_thread_flag(TIF_32BIT_FPREGS); + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + return; - clear_thread_flag(TIF_HYBRID_FPREGS); + switch (state->overall_fp_mode) { + case FP_FRE: + set_thread_fp_mode(1, 0); + break; + case FP_FR0: + set_thread_fp_mode(0, 1); + break; + case FP_FR1: + set_thread_fp_mode(0, 0); break; - default: - case FP_ERROR: BUG(); } } diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 4353d323f017..af41ba6db960 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -46,6 +46,11 @@ resume_userspace: local_irq_disable # make sure we dont miss an # interrupt setting need_resched # between sampling and return +#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR + lw k0, TI_R2_EMUL_RET($28) + bnez k0, restore_all_from_r2_emul +#endif + LONG_L a2, TI_FLAGS($28) # current->work andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) bnez t0, work_pending @@ -114,6 +119,19 @@ restore_partial: # restore partial frame RESTORE_SP_AND_RET .set at +#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR +restore_all_from_r2_emul: # restore full frame + .set noat + sw zero, TI_R2_EMUL_RET($28) # reset it + RESTORE_TEMP + RESTORE_AT + RESTORE_STATIC + RESTORE_SOME + LONG_L sp, PT_R29(sp) + eretnc + .set at +#endif + work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig @@ -158,7 +176,8 @@ syscall_exit_work: jal syscall_trace_leave b resume_userspace -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ + defined(CONFIG_MIPS_MT) /* * MIPS32R2 Instruction Hazard Barrier - must be called @@ -171,4 +190,4 @@ LEAF(mips_ihb) nop END(mips_ihb) -#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ +#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a5e26dd90592..2ebaabe3af15 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -125,7 +125,7 @@ LEAF(__r4k_wait) nop nop #endif - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW wait /* end of rollback region (the region size must be power of two) */ 1: diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 0b9082b6b683..368c88b7eb6c 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -186,6 +186,7 @@ void __init check_wait(void) case CPU_PROAPTIV: case CPU_P5600: case CPU_M5150: + case CPU_QEMU_GENERIC: cpu_wait = r4k_wait; if (read_c0_config7() & MIPS_CONF7_WII) cpu_wait = r4k_wait_irqoff; diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c new file mode 100644 index 000000000000..64d17e41093b --- /dev/null +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -0,0 +1,2378 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> + * Author: Markos Chandras <markos.chandras@imgtec.com> + * + * MIPS R2 user space instruction emulator for MIPS R6 + * + */ +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/ptrace.h> +#include <linux/seq_file.h> + +#include <asm/asm.h> +#include <asm/branch.h> +#include <asm/break.h> +#include <asm/fpu.h> +#include <asm/fpu_emulator.h> +#include <asm/inst.h> +#include <asm/mips-r2-to-r6-emul.h> +#include <asm/local.h> +#include <asm/ptrace.h> +#include <asm/uaccess.h> + +#ifdef CONFIG_64BIT +#define ADDIU "daddiu " +#define INS "dins " +#define EXT "dext " +#else +#define ADDIU "addiu " +#define INS "ins " +#define EXT "ext " +#endif /* CONFIG_64BIT */ + +#define SB "sb " +#define LB "lb " +#define LL "ll " +#define SC "sc " + +DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); +DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); +DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); + +extern const unsigned int fpucondbit[8]; + +#define MIPS_R2_EMUL_TOTAL_PASS 10 + +int mipsr2_emulation = 0; + +static int __init mipsr2emu_enable(char *s) +{ + mipsr2_emulation = 1; + + pr_info("MIPS R2-to-R6 Emulator Enabled!"); + + return 1; +} +__setup("mipsr2emu", mipsr2emu_enable); + +/** + * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot + * for performance instead of the traditional way of using a stack trampoline + * which is rather slow. + * @regs: Process register set + * @ir: Instruction + */ +static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) +{ + switch (MIPSInst_OPCODE(ir)) { + case addiu_op: + if (MIPSInst_RT(ir)) + regs->regs[MIPSInst_RT(ir)] = + (s32)regs->regs[MIPSInst_RS(ir)] + + (s32)MIPSInst_SIMM(ir); + return 0; + case daddiu_op: + if (config_enabled(CONFIG_32BIT)) + break; + + if (MIPSInst_RT(ir)) + regs->regs[MIPSInst_RT(ir)] = + (s64)regs->regs[MIPSInst_RS(ir)] + + (s64)MIPSInst_SIMM(ir); + return 0; + case lwc1_op: + case swc1_op: + case cop1_op: + case cop1x_op: + /* FPU instructions in delay slot */ + return -SIGFPE; + case spec_op: + switch (MIPSInst_FUNC(ir)) { + case or_op: + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + regs->regs[MIPSInst_RS(ir)] | + regs->regs[MIPSInst_RT(ir)]; + return 0; + case sll_op: + if (MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << + MIPSInst_FD(ir)); + return 0; + case srl_op: + if (MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> + MIPSInst_FD(ir)); + return 0; + case addu_op: + if (MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)((u32)regs->regs[MIPSInst_RS(ir)] + + (u32)regs->regs[MIPSInst_RT(ir)]); + return 0; + case subu_op: + if (MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)((u32)regs->regs[MIPSInst_RS(ir)] - + (u32)regs->regs[MIPSInst_RT(ir)]); + return 0; + case dsll_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << + MIPSInst_FD(ir)); + return 0; + case dsrl_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> + MIPSInst_FD(ir)); + return 0; + case daddu_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (u64)regs->regs[MIPSInst_RS(ir)] + + (u64)regs->regs[MIPSInst_RT(ir)]; + return 0; + case dsubu_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)((u64)regs->regs[MIPSInst_RS(ir)] - + (u64)regs->regs[MIPSInst_RT(ir)]); + return 0; + } + break; + default: + pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", + ir, MIPSInst_OPCODE(ir)); + } + + return SIGILL; +} + +/** + * movt_func - Emulate a MOVT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movf_func(struct pt_regs *regs, u32 ir) +{ + u32 csr; + u32 cond; + + csr = current->thread.fpu.fcr31; + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + if (((csr & cond) == 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + return 0; +} + +/** + * movt_func - Emulate a MOVT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movt_func(struct pt_regs *regs, u32 ir) +{ + u32 csr; + u32 cond; + + csr = current->thread.fpu.fcr31; + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + + if (((csr & cond) != 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * jr_func - Emulate a JR instruction. + * @pt_regs: Process register set + * @ir: Instruction + * + * Returns SIGILL if JR was in delay slot, SIGEMT if we + * can't compute the EPC, SIGSEGV if we can't access the + * userland instruction or 0 on success. + */ +static int jr_func(struct pt_regs *regs, u32 ir) +{ + int err; + unsigned long cepc, epc, nepc; + u32 nir; + + if (delay_slot(regs)) + return SIGILL; + + /* EPC after the RI/JR instruction */ + nepc = regs->cp0_epc; + /* Roll back to the reserved R2 JR instruction */ + regs->cp0_epc -= 4; + epc = regs->cp0_epc; + err = __compute_return_epc(regs); + + if (err < 0) + return SIGEMT; + + + /* Computed EPC */ + cepc = regs->cp0_epc; + + /* Get DS instruction */ + err = __get_user(nir, (u32 __user *)nepc); + if (err) + return SIGSEGV; + + MIPS_R2BR_STATS(jrs); + + /* If nir == 0(NOP), then nothing else to do */ + if (nir) { + /* + * Negative err means FPU instruction in BD-slot, + * Zero err means 'BD-slot emulation done' + * For anything else we go back to trampoline emulation. + */ + err = mipsr6_emul(regs, nir); + if (err > 0) { + regs->cp0_epc = nepc; + err = mips_dsemul(regs, nir, cepc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + + return err; +} + +/** + * movz_func - Emulate a MOVZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movz_func(struct pt_regs *regs, u32 ir) +{ + if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * movn_func - Emulate a MOVZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movn_func(struct pt_regs *regs, u32 ir) +{ + if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * mfhi_func - Emulate a MFHI instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mfhi_func(struct pt_regs *regs, u32 ir) +{ + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->hi; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mthi_func - Emulate a MTHI instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mthi_func(struct pt_regs *regs, u32 ir) +{ + regs->hi = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mflo_func - Emulate a MFLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mflo_func(struct pt_regs *regs, u32 ir) +{ + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->lo; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mtlo_func - Emulate a MTLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mtlo_func(struct pt_regs *regs, u32 ir) +{ + regs->lo = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mult_func - Emulate a MULT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mult_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + + rs = res; + regs->lo = (s64)rs; + rt = res >> 32; + res = (s64)rt; + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * multu_func - Emulate a MULTU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int multu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = res; + regs->lo = (s64)rt; + regs->hi = (s64)(res >> 32); + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * div_func - Emulate a DIV instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int div_func(struct pt_regs *regs, u32 ir) +{ + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = (s64)(rs / rt); + regs->hi = (s64)(rs % rt); + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * divu_func - Emulate a DIVU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int divu_func(struct pt_regs *regs, u32 ir) +{ + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = (s64)(rs / rt); + regs->hi = (s64)(rs % rt); + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * dmult_func - Emulate a DMULT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int dmult_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = rt * rs; + + regs->lo = res; + __asm__ __volatile__( + "dmuh %0, %1, %2\t\n" + : "=r"(res) + : "r"(rt), "r"(rs)); + + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * dmultu_func - Emulate a DMULTU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int dmultu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = rt * rs; + + regs->lo = res; + __asm__ __volatile__( + "dmuhu %0, %1, %2\t\n" + : "=r"(res) + : "r"(rt), "r"(rs)); + + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * ddiv_func - Emulate a DDIV instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int ddiv_func(struct pt_regs *regs, u32 ir) +{ + s64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = rs / rt; + regs->hi = rs % rt; + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * ddivu_func - Emulate a DDIVU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int ddivu_func(struct pt_regs *regs, u32 ir) +{ + u64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = rs / rt; + regs->hi = rs % rt; + + MIPS_R2_STATS(divs); + + return 0; +} + +/* R6 removed instructions for the SPECIAL opcode */ +static struct r2_decoder_table spec_op_table[] = { + { 0xfc1ff83f, 0x00000008, jr_func }, + { 0xfc00ffff, 0x00000018, mult_func }, + { 0xfc00ffff, 0x00000019, multu_func }, + { 0xfc00ffff, 0x0000001c, dmult_func }, + { 0xfc00ffff, 0x0000001d, dmultu_func }, + { 0xffff07ff, 0x00000010, mfhi_func }, + { 0xfc1fffff, 0x00000011, mthi_func }, + { 0xffff07ff, 0x00000012, mflo_func }, + { 0xfc1fffff, 0x00000013, mtlo_func }, + { 0xfc0307ff, 0x00000001, movf_func }, + { 0xfc0307ff, 0x00010001, movt_func }, + { 0xfc0007ff, 0x0000000a, movz_func }, + { 0xfc0007ff, 0x0000000b, movn_func }, + { 0xfc00ffff, 0x0000001a, div_func }, + { 0xfc00ffff, 0x0000001b, divu_func }, + { 0xfc00ffff, 0x0000001e, ddiv_func }, + { 0xfc00ffff, 0x0000001f, ddivu_func }, + {} +}; + +/** + * madd_func - Emulate a MADD instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int madd_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + rt = regs->hi; + rs = regs->lo; + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * maddu_func - Emulate a MADDU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int maddu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = regs->hi; + rs = regs->lo; + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * msub_func - Emulate a MSUB instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int msub_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + rt = regs->hi; + rs = regs->lo; + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * msubu_func - Emulate a MSUBU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int msubu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = regs->hi; + rs = regs->lo; + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * mul_func - Emulate a MUL instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mul_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + if (!MIPSInst_RD(ir)) + return 0; + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + + rs = res; + regs->regs[MIPSInst_RD(ir)] = (s64)rs; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * clz_func - Emulate a CLZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int clz_func(struct pt_regs *regs, u32 ir) +{ + u32 res; + u32 rs; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * clo_func - Emulate a CLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ + +static int clo_func(struct pt_regs *regs, u32 ir) +{ + u32 res; + u32 rs; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * dclz_func - Emulate a DCLZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int dclz_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * dclo_func - Emulate a DCLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int dclo_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/* R6 removed instructions for the SPECIAL2 opcode */ +static struct r2_decoder_table spec2_op_table[] = { + { 0xfc00ffff, 0x70000000, madd_func }, + { 0xfc00ffff, 0x70000001, maddu_func }, + { 0xfc0007ff, 0x70000002, mul_func }, + { 0xfc00ffff, 0x70000004, msub_func }, + { 0xfc00ffff, 0x70000005, msubu_func }, + { 0xfc0007ff, 0x70000020, clz_func }, + { 0xfc0007ff, 0x70000021, clo_func }, + { 0xfc0007ff, 0x70000024, dclz_func }, + { 0xfc0007ff, 0x70000025, dclo_func }, + { } +}; + +static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, + struct r2_decoder_table *table) +{ + struct r2_decoder_table *p; + int err; + + for (p = table; p->func; p++) { + if ((inst & p->mask) == p->code) { + err = (p->func)(regs, inst); + return err; + } + } + return SIGILL; +} + +/** + * mipsr2_decoder: Decode and emulate a MIPS R2 instruction + * @regs: Process register set + * @inst: Instruction to decode and emulate + */ +int mipsr2_decoder(struct pt_regs *regs, u32 inst) +{ + int err = 0; + unsigned long vaddr; + u32 nir; + unsigned long cpc, epc, nepc, r31, res, rs, rt; + + void __user *fault_addr = NULL; + int pass = 0; + +repeat: + r31 = regs->regs[31]; + epc = regs->cp0_epc; + err = compute_return_epc(regs); + if (err < 0) { + BUG(); + return SIGEMT; + } + pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", + inst, epc, pass); + + switch (MIPSInst_OPCODE(inst)) { + case spec_op: + err = mipsr2_find_op_func(regs, inst, spec_op_table); + if (err < 0) { + /* FPU instruction under JR */ + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + break; + case spec2_op: + err = mipsr2_find_op_func(regs, inst, spec2_op_table); + break; + case bcond_op: + rt = MIPSInst_RT(inst); + rs = MIPSInst_RS(inst); + switch (rt) { + case tgei_op: + if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TGEI"); + + MIPS_R2_STATS(traps); + + break; + case tgeiu_op: + if (regs->regs[rs] >= MIPSInst_UIMM(inst)) + do_trap_or_bp(regs, 0, "TGEIU"); + + MIPS_R2_STATS(traps); + + break; + case tlti_op: + if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TLTI"); + + MIPS_R2_STATS(traps); + + break; + case tltiu_op: + if (regs->regs[rs] < MIPSInst_UIMM(inst)) + do_trap_or_bp(regs, 0, "TLTIU"); + + MIPS_R2_STATS(traps); + + break; + case teqi_op: + if (regs->regs[rs] == MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TEQI"); + + MIPS_R2_STATS(traps); + + break; + case tnei_op: + if (regs->regs[rs] != MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TNEI"); + + MIPS_R2_STATS(traps); + + break; + case bltzl_op: + case bgezl_op: + case bltzall_op: + case bgezall_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + if (err != BRANCH_LIKELY_TAKEN) + break; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (rt) { + case bltzl_op: + MIPS_R2BR_STATS(bltzl); + break; + case bgezl_op: + MIPS_R2BR_STATS(bgezl); + break; + case bltzall_op: + MIPS_R2BR_STATS(bltzall); + break; + case bgezall_op: + MIPS_R2BR_STATS(bgezall); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + case bltzal_op: + case bgezal_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (rt) { + case bltzal_op: + MIPS_R2BR_STATS(bltzal); + break; + case bgezal_op: + MIPS_R2BR_STATS(bgezal); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + default: + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = SIGILL; + break; + } + break; + + case beql_op: + case bnel_op: + case blezl_op: + case bgtzl_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + if (err != BRANCH_LIKELY_TAKEN) + break; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (MIPSInst_OPCODE(inst)) { + case beql_op: + MIPS_R2BR_STATS(beql); + break; + case bnel_op: + MIPS_R2BR_STATS(bnel); + break; + case blezl_op: + MIPS_R2BR_STATS(blezl); + break; + case bgtzl_op: + MIPS_R2BR_STATS(bgtzl); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + case lwc1_op: + case swc1_op: + case cop1_op: + case cop1x_op: +fpu_emul: + regs->regs[31] = r31; + regs->cp0_epc = epc; + if (!used_math()) { /* First time FPU user. */ + err = init_fpu(); + set_used_math(); + } + lose_fpu(1); /* Save FPU state for the emulator. */ + + err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, + &fault_addr); + + /* + * this is a tricky issue - lose_fpu() uses LL/SC atomics + * if FPU is owned and effectively cancels user level LL/SC. + * So, it could be logical to don't restore FPU ownership here. + * But the sequence of multiple FPU instructions is much much + * more often than LL-FPU-SC and I prefer loop here until + * next scheduler cycle cancels FPU ownership + */ + own_fpu(1); /* Restore FPU state. */ + + if (err) + current->thread.cp0_baduaddr = (unsigned long)fault_addr; + + MIPS_R2_STATS(fpus); + + break; + + case lwl_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9: sll %0, %0, 0\n" + "10:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 10b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + + break; + + case lwr_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " sll %0, %0, 0\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " sll %0, %0, 0\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + "10:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 10b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + + break; + + case swl_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + EXT "%1, %0, 24, 8\n" + "1:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 16, 8\n" + "2:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 8, 8\n" + "3:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 0, 8\n" + "4:" SB "%1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + EXT "%1, %0, 24, 8\n" + "1:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 16, 8\n" + "2:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 8, 8\n" + "3:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 0, 8\n" + "4:" SB "%1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + + case swr_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + EXT "%1, %0, 0, 8\n" + "1:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 8, 8\n" + "2:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 16, 8\n" + "3:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 24, 8\n" + "4:" SB "%1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + EXT "%1, %0, 0, 8\n" + "1:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 8, 8\n" + "2:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 16, 8\n" + "3:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 24, 8\n" + "4:" SB "%1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + + case ldl_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "2: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "3: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "4: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "5: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "6: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "7: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "0: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "2: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "3: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "4: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "5: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "6: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "7: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "0: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + break; + + case ldr_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "2: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "3: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "4: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "5: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "6: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "7: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "0: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "2: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "3: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "4: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "5: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "6: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "7: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "0: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + break; + + case sdl_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + " dextu %1, %0, 56, 8\n" + "1: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 48, 8\n" + "2: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 40, 8\n" + "3: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 32, 8\n" + "4: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 24, 8\n" + "5: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 16, 8\n" + "6: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 8, 8\n" + "7: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 0, 8\n" + "0: sb %1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + " dextu %1, %0, 56, 8\n" + "1: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 48, 8\n" + "2: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 40, 8\n" + "3: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 32, 8\n" + "4: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 24, 8\n" + "5: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 16, 8\n" + "6: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 8, 8\n" + "7: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 0, 8\n" + "0: sb %1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + break; + + case sdr_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + " dext %1, %0, 0, 8\n" + "1: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 8, 8\n" + "2: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 16, 8\n" + "3: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 24, 8\n" + "4: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 32, 8\n" + "5: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 40, 8\n" + "6: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 48, 8\n" + "7: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 56, 8\n" + "0: sb %1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + " dext %1, %0, 0, 8\n" + "1: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 8, 8\n" + "2: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 16, 8\n" + "3: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 24, 8\n" + "4: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 32, 8\n" + "5: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 40, 8\n" + "6: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 48, 8\n" + "7: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 56, 8\n" + "0: sb %1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + case ll_op: + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x3) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + __asm__ __volatile__( + "1:\n" + "ll %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "=&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV) + : "memory"); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + MIPS_R2_STATS(llsc); + + break; + + case sc_op: + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x3) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + res = regs->regs[MIPSInst_RT(inst)]; + + __asm__ __volatile__( + "1:\n" + "sc %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "+&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + + case lld_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x7) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + __asm__ __volatile__( + "1:\n" + "lld %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "=&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV) + : "memory"); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + + case scd_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x7) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + res = regs->regs[MIPSInst_RT(inst)]; + + __asm__ __volatile__( + "1:\n" + "scd %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "+&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + case pref_op: + /* skip it */ + break; + default: + err = SIGILL; + } + + /* + * Lets not return to userland just yet. It's constly and + * it's likely we have more R2 instructions to emulate + */ + if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { + regs->cp0_cause &= ~CAUSEF_BD; + err = get_user(inst, (u32 __user *)regs->cp0_epc); + if (!err) + goto repeat; + + if (err < 0) + err = SIGSEGV; + } + + if (err && (err != SIGEMT)) { + regs->regs[31] = r31; + regs->cp0_epc = epc; + } + + /* Likely a MIPS R6 compatible instruction */ + if (pass && (err == SIGILL)) + err = 0; + + return err; +} + +#ifdef CONFIG_DEBUG_FS + +static int mipsr2_stats_show(struct seq_file *s, void *unused) +{ + + seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); + seq_printf(s, "movs\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.movs), + (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); + seq_printf(s, "hilo\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.hilo), + (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); + seq_printf(s, "muls\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.muls), + (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); + seq_printf(s, "divs\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.divs), + (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); + seq_printf(s, "dsps\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.dsps), + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); + seq_printf(s, "bops\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.bops), + (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); + seq_printf(s, "traps\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.traps), + (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); + seq_printf(s, "fpus\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.fpus), + (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); + seq_printf(s, "loads\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.loads), + (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); + seq_printf(s, "stores\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.stores), + (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); + seq_printf(s, "llsc\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.llsc), + (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); + seq_printf(s, "dsemul\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); + seq_printf(s, "jr\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); + seq_printf(s, "bltzl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); + seq_printf(s, "bgezl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); + seq_printf(s, "bltzll\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); + seq_printf(s, "bgezll\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); + seq_printf(s, "bltzal\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); + seq_printf(s, "bgezal\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); + seq_printf(s, "beql\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); + seq_printf(s, "bnel\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); + seq_printf(s, "blezl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); + seq_printf(s, "bgtzl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); + + return 0; +} + +static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) +{ + mipsr2_stats_show(s, unused); + + __this_cpu_write((mipsr2emustats).movs, 0); + __this_cpu_write((mipsr2bdemustats).movs, 0); + __this_cpu_write((mipsr2emustats).hilo, 0); + __this_cpu_write((mipsr2bdemustats).hilo, 0); + __this_cpu_write((mipsr2emustats).muls, 0); + __this_cpu_write((mipsr2bdemustats).muls, 0); + __this_cpu_write((mipsr2emustats).divs, 0); + __this_cpu_write((mipsr2bdemustats).divs, 0); + __this_cpu_write((mipsr2emustats).dsps, 0); + __this_cpu_write((mipsr2bdemustats).dsps, 0); + __this_cpu_write((mipsr2emustats).bops, 0); + __this_cpu_write((mipsr2bdemustats).bops, 0); + __this_cpu_write((mipsr2emustats).traps, 0); + __this_cpu_write((mipsr2bdemustats).traps, 0); + __this_cpu_write((mipsr2emustats).fpus, 0); + __this_cpu_write((mipsr2bdemustats).fpus, 0); + __this_cpu_write((mipsr2emustats).loads, 0); + __this_cpu_write((mipsr2bdemustats).loads, 0); + __this_cpu_write((mipsr2emustats).stores, 0); + __this_cpu_write((mipsr2bdemustats).stores, 0); + __this_cpu_write((mipsr2emustats).llsc, 0); + __this_cpu_write((mipsr2bdemustats).llsc, 0); + __this_cpu_write((mipsr2emustats).dsemul, 0); + __this_cpu_write((mipsr2bdemustats).dsemul, 0); + __this_cpu_write((mipsr2bremustats).jrs, 0); + __this_cpu_write((mipsr2bremustats).bltzl, 0); + __this_cpu_write((mipsr2bremustats).bgezl, 0); + __this_cpu_write((mipsr2bremustats).bltzll, 0); + __this_cpu_write((mipsr2bremustats).bgezll, 0); + __this_cpu_write((mipsr2bremustats).bltzal, 0); + __this_cpu_write((mipsr2bremustats).bgezal, 0); + __this_cpu_write((mipsr2bremustats).beql, 0); + __this_cpu_write((mipsr2bremustats).bnel, 0); + __this_cpu_write((mipsr2bremustats).blezl, 0); + __this_cpu_write((mipsr2bremustats).bgtzl, 0); + + return 0; +} + +static int mipsr2_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, mipsr2_stats_show, inode->i_private); +} + +static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) +{ + return single_open(file, mipsr2_stats_clear_show, inode->i_private); +} + +static const struct file_operations mipsr2_emul_fops = { + .open = mipsr2_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations mipsr2_clear_fops = { + .open = mipsr2_stats_clear_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +static int __init mipsr2_init_debugfs(void) +{ + extern struct dentry *mips_debugfs_dir; + struct dentry *mipsr2_emul; + + if (!mips_debugfs_dir) + return -ENODEV; + + mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, + mips_debugfs_dir, NULL, + &mipsr2_emul_fops); + if (!mipsr2_emul) + return -ENOMEM; + + mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, + mips_debugfs_dir, NULL, + &mipsr2_clear_fops); + if (!mipsr2_emul) + return -ENOMEM; + + return 0; +} + +device_initcall(mipsr2_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 17eaf0cf760c..291af0b5c482 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -14,6 +14,8 @@ #include <linux/mm.h> #include <asm/uaccess.h> #include <asm/ftrace.h> +#include <asm/fpu.h> +#include <asm/msa.h> extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_kernel_nocheck_asm(char *__to, @@ -32,6 +34,14 @@ extern long __strnlen_user_nocheck_asm(const char *s); extern long __strnlen_user_asm(const char *s); /* + * Core architecture code + */ +EXPORT_SYMBOL_GPL(_save_fp); +#ifdef CONFIG_CPU_HAS_MSA +EXPORT_SYMBOL_GPL(_save_msa); +#endif + +/* * String functions */ EXPORT_SYMBOL(memset); @@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm); EXPORT_SYMBOL(__strnlen_user_nocheck_asm); EXPORT_SYMBOL(__strnlen_user_asm); +#ifndef CONFIG_CPU_MIPSR6 EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_partial_copy_kernel); EXPORT_SYMBOL(__csum_partial_copy_to_user); EXPORT_SYMBOL(__csum_partial_copy_from_user); +#endif EXPORT_SYMBOL(invalid_pte_table); #ifdef CONFIG_FUNCTION_TRACER diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index f6547680c81c..423ae83af1fb 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -31,15 +31,11 @@ /* * check if we need to save FPU registers */ - PTR_L t3, TASK_THREAD_INFO(a0) - LONG_L t0, TI_FLAGS(t3) - li t1, _TIF_USEDFPU - and t2, t0, t1 - beqz t2, 1f - nor t1, zero, t1 - - and t0, t0, t1 - LONG_S t0, TI_FLAGS(t3) + .set push + .set noreorder + beqz a3, 1f + PTR_L t3, TASK_THREAD_INFO(a0) + .set pop /* * clear saved user stack CU1 bit @@ -56,36 +52,9 @@ .set pop 1: - /* check if we need to save COP2 registers */ - PTR_L t2, TASK_THREAD_INFO(a0) - LONG_L t0, ST_OFF(t2) - bbit0 t0, 30, 1f - - /* Disable COP2 in the stored process state */ - li t1, ST0_CU2 - xor t0, t1 - LONG_S t0, ST_OFF(t2) - - /* Enable COP2 so we can save it */ - mfc0 t0, CP0_STATUS - or t0, t1 - mtc0 t0, CP0_STATUS - - /* Save COP2 */ - daddu a0, THREAD_CP2 - jal octeon_cop2_save - dsubu a0, THREAD_CP2 - - /* Disable COP2 now that we are done */ - mfc0 t0, CP0_STATUS - li t1, ST0_CU2 - xor t0, t1 - mtc0 t0, CP0_STATUS - -1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ - mfc0 t0, $11,7 /* CvmMemCtl */ + dmfc0 t0, $11,7 /* CvmMemCtl */ bbit0 t0, 6, 3f /* Is user access enabled? */ /* Store the CVMSEG state */ @@ -109,9 +78,9 @@ .set reorder /* Disable access to CVMSEG */ - mfc0 t0, $11,7 /* CvmMemCtl */ + dmfc0 t0, $11,7 /* CvmMemCtl */ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ - mtc0 t0, $11,7 /* CvmMemCtl */ + dmtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: @@ -147,6 +116,8 @@ * void octeon_cop2_save(struct octeon_cop2_state *a0) */ .align 7 + .set push + .set noreorder LEAF(octeon_cop2_save) dmfc0 t9, $9,7 /* CvmCtl register. */ @@ -157,17 +128,17 @@ dmfc2 t2, 0x0200 sd t0, OCTEON_CP2_CRC_IV(a0) sd t1, OCTEON_CP2_CRC_LENGTH(a0) - sd t2, OCTEON_CP2_CRC_POLY(a0) /* Skip next instructions if CvmCtl[NODFA_CP2] set */ bbit1 t9, 28, 1f + sd t2, OCTEON_CP2_CRC_POLY(a0) /* Save the LLM state */ dmfc2 t0, 0x0402 dmfc2 t1, 0x040A sd t0, OCTEON_CP2_LLM_DAT(a0) - sd t1, OCTEON_CP2_LLM_DAT+8(a0) 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ + sd t1, OCTEON_CP2_LLM_DAT+8(a0) /* Save the COP2 crypto state */ /* this part is mostly common to both pass 1 and later revisions */ @@ -198,18 +169,20 @@ sd t2, OCTEON_CP2_AES_KEY+16(a0) dmfc2 t2, 0x0101 sd t3, OCTEON_CP2_AES_KEY+24(a0) - mfc0 t3, $15,0 /* Get the processor ID register */ + mfc0 v0, $15,0 /* Get the processor ID register */ sd t0, OCTEON_CP2_AES_KEYLEN(a0) - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ sd t1, OCTEON_CP2_AES_RESULT(a0) - sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* Skip to the Pass1 version of the remainder of the COP2 state */ - beq t3, t0, 2f + beq v0, v1, 2f + sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ dmfc2 t1, 0x0240 dmfc2 t2, 0x0241 + ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ dmfc2 t3, 0x0242 + subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ dmfc2 t0, 0x0243 sd t1, OCTEON_CP2_HSH_DATW(a0) dmfc2 t1, 0x0244 @@ -262,8 +235,16 @@ sd t1, OCTEON_CP2_GFM_MULT+8(a0) sd t2, OCTEON_CP2_GFM_POLY(a0) sd t3, OCTEON_CP2_GFM_RESULT(a0) - sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + bltz v1, 4f + sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + /* OCTEON III things*/ + dmfc2 t0, 0x024F + dmfc2 t1, 0x0050 + sd t0, OCTEON_CP2_SHA3(a0) + sd t1, OCTEON_CP2_SHA3+8(a0) +4: jr ra + nop 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ dmfc2 t3, 0x0040 @@ -289,7 +270,9 @@ 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ jr ra + nop END(octeon_cop2_save) + .set pop /* * void octeon_cop2_restore(struct octeon_cop2_state *a0) @@ -354,9 +337,9 @@ ld t2, OCTEON_CP2_AES_RESULT+8(a0) mfc0 t3, $15,0 /* Get the processor ID register */ dmtc2 t0, 0x0110 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ dmtc2 t1, 0x0100 - bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ + bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ dmtc2 t2, 0x0101 /* this code is specific for pass 1 */ @@ -384,6 +367,7 @@ 3: /* this is post-pass1 code */ ld t2, OCTEON_CP2_HSH_DATW(a0) + ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ ld t0, OCTEON_CP2_HSH_DATW+8(a0) ld t1, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t2, 0x0240 @@ -437,9 +421,15 @@ dmtc2 t2, 0x0259 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) dmtc2 t0, 0x025E + subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ dmtc2 t1, 0x025A - dmtc2 t2, 0x025B - + bltz v0, done_restore + dmtc2 t2, 0x025B + /* OCTEON III things*/ + ld t0, OCTEON_CP2_SHA3(a0) + ld t1, OCTEON_CP2_SHA3+8(a0) + dmtc2 t0, 0x0051 + dmtc2 t1, 0x0050 done_restore: jr ra nop @@ -450,18 +440,23 @@ done_restore: * void octeon_mult_save() * sp is assumed to point to a struct pt_regs * - * NOTE: This is called in SAVE_SOME in stackframe.h. It can only - * safely modify k0 and k1. + * NOTE: This is called in SAVE_TEMP in stackframe.h. It can + * safely modify v1,k0, k1,$10-$15, and $24. It will + * be overwritten with a processor specific version of the code. */ - .align 7 + .p2align 7 .set push .set noreorder LEAF(octeon_mult_save) - dmfc0 k0, $9,7 /* CvmCtl register. */ - bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ + jr ra nop + .space 30 * 4, 0 +octeon_mult_save_end: + EXPORT(octeon_mult_save_end) + END(octeon_mult_save) - /* Save the multiplier state */ + LEAF(octeon_mult_save2) + /* Save the multiplier state OCTEON II and earlier*/ v3mulu k0, $0, $0 v3mulu k1, $0, $0 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ @@ -476,44 +471,107 @@ done_restore: sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ - -1: /* Resume here if CvmCtl[NOMUL] */ +octeon_mult_save2_end: + EXPORT(octeon_mult_save2_end) + END(octeon_mult_save2) + + LEAF(octeon_mult_save3) + /* Save the multiplier state OCTEON III */ + v3mulu $10, $0, $0 /* read P0 */ + v3mulu $11, $0, $0 /* read P1 */ + v3mulu $12, $0, $0 /* read P2 */ + sd $10, PT_MTP+(0*8)(sp) /* store P0 */ + v3mulu $10, $0, $0 /* read P3 */ + sd $11, PT_MTP+(1*8)(sp) /* store P1 */ + v3mulu $11, $0, $0 /* read P4 */ + sd $12, PT_MTP+(2*8)(sp) /* store P2 */ + ori $13, $0, 1 + v3mulu $12, $0, $0 /* read P5 */ + sd $10, PT_MTP+(3*8)(sp) /* store P3 */ + v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ + sd $11, PT_MTP+(4*8)(sp) /* store P4 */ + v3mulu $10, $0, $0 /* read MPL1 */ + sd $12, PT_MTP+(5*8)(sp) /* store P5 */ + v3mulu $11, $0, $0 /* read MPL2 */ + sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ + v3mulu $12, $0, $0 /* read MPL3 */ + sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ + v3mulu $10, $0, $0 /* read MPL4 */ + sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ + v3mulu $11, $0, $0 /* read MPL5 */ + sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ + sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ jr ra - END(octeon_mult_save) + sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ +octeon_mult_save3_end: + EXPORT(octeon_mult_save3_end) + END(octeon_mult_save3) .set pop /* * void octeon_mult_restore() * sp is assumed to point to a struct pt_regs * - * NOTE: This is called in RESTORE_SOME in stackframe.h. + * NOTE: This is called in RESTORE_TEMP in stackframe.h. */ - .align 7 + .p2align 7 .set push .set noreorder LEAF(octeon_mult_restore) - dmfc0 k1, $9,7 /* CvmCtl register. */ - ld v0, PT_MPL(sp) /* MPL0 */ - ld v1, PT_MPL+8(sp) /* MPL1 */ - ld k0, PT_MPL+16(sp) /* MPL2 */ - bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ - /* Normally falls through, so no time wasted here */ - nop + jr ra + nop + .space 30 * 4, 0 +octeon_mult_restore_end: + EXPORT(octeon_mult_restore_end) + END(octeon_mult_restore) + LEAF(octeon_mult_restore2) + ld v0, PT_MPL(sp) /* MPL0 */ + ld v1, PT_MPL+8(sp) /* MPL1 */ + ld k0, PT_MPL+16(sp) /* MPL2 */ /* Restore the multiplier state */ - ld k1, PT_MTP+16(sp) /* P2 */ - MTM0 v0 /* MPL0 */ + ld k1, PT_MTP+16(sp) /* P2 */ + mtm0 v0 /* MPL0 */ ld v0, PT_MTP+8(sp) /* P1 */ - MTM1 v1 /* MPL1 */ - ld v1, PT_MTP(sp) /* P0 */ - MTM2 k0 /* MPL2 */ - MTP2 k1 /* P2 */ - MTP1 v0 /* P1 */ + mtm1 v1 /* MPL1 */ + ld v1, PT_MTP(sp) /* P0 */ + mtm2 k0 /* MPL2 */ + mtp2 k1 /* P2 */ + mtp1 v0 /* P1 */ jr ra - MTP0 v1 /* P0 */ - -1: /* Resume here if CvmCtl[NOMUL] */ + mtp0 v1 /* P0 */ +octeon_mult_restore2_end: + EXPORT(octeon_mult_restore2_end) + END(octeon_mult_restore2) + + LEAF(octeon_mult_restore3) + ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ + ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ + ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ + ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ + .word 0x718d0008 + /* mtm0 $12, $13 restore MPL0 and MPL3 */ + ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ + .word 0x714b000c + /* mtm1 $10, $11 restore MPL1 and MPL4 */ + ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ + ld $10, PT_MTP+(0*8)(sp) /* read P0 */ + ld $11, PT_MTP+(3*8)(sp) /* read P3 */ + .word 0x718d000d + /* mtm2 $12, $13 restore MPL2 and MPL5 */ + ld $12, PT_MTP+(1*8)(sp) /* read P1 */ + .word 0x714b0009 + /* mtp0 $10, $11 restore P0 and P3 */ + ld $13, PT_MTP+(4*8)(sp) /* read P4 */ + ld $10, PT_MTP+(2*8)(sp) /* read P2 */ + ld $11, PT_MTP+(5*8)(sp) /* read P5 */ + .word 0x718d000a + /* mtp1 $12, $13 restore P1 and P4 */ jr ra - nop - END(octeon_mult_restore) + .word 0x714b000b + /* mtp2 $10, $11 restore P2 and P5 */ + +octeon_mult_restore3_end: + EXPORT(octeon_mult_restore3_end) + END(octeon_mult_restore3) .set pop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 097fc8d14e42..130af7d26a9c 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "]\n"); } - seq_printf(m, "isa\t\t\t: mips1"); + seq_printf(m, "isa\t\t\t:"); + if (cpu_has_mips_r1) + seq_printf(m, " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) @@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "%s", " mips32r1"); if (cpu_has_mips32r2) seq_printf(m, "%s", " mips32r2"); + if (cpu_has_mips32r6) + seq_printf(m, "%s", " mips32r6"); if (cpu_has_mips64r1) seq_printf(m, "%s", " mips64r1"); if (cpu_has_mips64r2) seq_printf(m, "%s", " mips64r2"); + if (cpu_has_mips64r6) + seq_printf(m, "%s", " mips64r6"); seq_printf(m, "\n"); seq_printf(m, "ASEs implemented\t:"); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 85bff5d513e5..bf85cc180d91 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -25,6 +25,7 @@ #include <linux/completion.h> #include <linux/kallsyms.h> #include <linux/random.h> +#include <linux/prctl.h> #include <asm/asm.h> #include <asm/bootinfo.h> @@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self) { smp_call_function(arch_dump_stack, NULL, 1); } + +int mips_get_process_fp_mode(struct task_struct *task) +{ + int value = 0; + + if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) + value |= PR_FP_MODE_FR; + if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) + value |= PR_FP_MODE_FRE; + + return value; +} + +int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) +{ + const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; + unsigned long switch_count; + struct task_struct *t; + + /* Check the value is valid */ + if (value & ~known_bits) + return -EOPNOTSUPP; + + /* Avoid inadvertently triggering emulation */ + if ((value & PR_FP_MODE_FR) && cpu_has_fpu && + !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + return -EOPNOTSUPP; + if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + return -EOPNOTSUPP; + + /* FR = 0 not supported in MIPS R6 */ + if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + return -EOPNOTSUPP; + + /* Save FP & vector context, then disable FPU & MSA */ + if (task->signal == current->signal) + lose_fpu(1); + + /* Prevent any threads from obtaining live FP context */ + atomic_set(&task->mm->context.fp_mode_switching, 1); + smp_mb__after_atomic(); + + /* + * If there are multiple online CPUs then wait until all threads whose + * FP mode is about to change have been context switched. This approach + * allows us to only worry about whether an FP mode switch is in + * progress when FP is first used in a tasks time slice. Pretty much all + * of the mode switch overhead can thus be confined to cases where mode + * switches are actually occuring. That is, to here. However for the + * thread performing the mode switch it may take a while... + */ + if (num_online_cpus() > 1) { + spin_lock_irq(&task->sighand->siglock); + + for_each_thread(task, t) { + if (t == current) + continue; + + switch_count = t->nvcsw + t->nivcsw; + + do { + spin_unlock_irq(&task->sighand->siglock); + cond_resched(); + spin_lock_irq(&task->sighand->siglock); + } while ((t->nvcsw + t->nivcsw) == switch_count); + } + + spin_unlock_irq(&task->sighand->siglock); + } + + /* + * There are now no threads of the process with live FP context, so it + * is safe to proceed with the FP mode switch. + */ + for_each_thread(task, t) { + /* Update desired FP register width */ + if (value & PR_FP_MODE_FR) { + clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); + } else { + set_tsk_thread_flag(t, TIF_32BIT_FPREGS); + clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); + } + + /* Update desired FP single layout */ + if (value & PR_FP_MODE_FRE) + set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); + else + clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); + } + + /* Allow threads to use FP again */ + atomic_set(&task->mm->context.fp_mode_switching, 0); + + return 0; +} diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 6c160c67984c..676c5030a953 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -34,7 +34,7 @@ .endm .set noreorder - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW LEAF(_save_fp_context) .set push @@ -42,7 +42,8 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 .set pop -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 @@ -105,10 +106,12 @@ LEAF(_save_fp_context32) SET_HARDFLOAT cfc1 t1, fcr31 +#ifndef CONFIG_CPU_MIPS64_R6 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 nop +#endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, SC32_FPREGS+8(a0) @@ -163,7 +166,8 @@ LEAF(_save_fp_context32) LEAF(_restore_fp_context) EX lw t1, SC_FPC_CSR(a0) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 @@ -223,10 +227,12 @@ LEAF(_restore_fp_context32) SET_HARDFLOAT EX lw t1, SC32_FPC_CSR(a0) +#ifndef CONFIG_CPU_MIPS64_R6 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 nop +#endif EX ldc1 $f1, SC32_FPREGS+8(a0) EX ldc1 $f3, SC32_FPREGS+24(a0) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 64591e671878..3b1a36f13a7d 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -115,7 +115,8 @@ * Save a thread's fp context. */ LEAF(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -126,7 +127,8 @@ LEAF(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -240,9 +242,9 @@ LEAF(_init_fpu) mtc1 t1, $f30 mtc1 t1, $f31 -#ifdef CONFIG_CPU_MIPS32_R2 +#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip setting upper 32b @@ -280,9 +282,9 @@ LEAF(_init_fpu) mthc1 t1, $f30 mthc1 t1, $f31 1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 */ +#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ #else - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW dmtc1 t1, $f0 dmtc1 t1, $f2 dmtc1 t1, $f4 diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 67f2495def1c..d1168d7c31e8 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -208,6 +208,7 @@ void spram_config(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: + case CPU_QEMU_GENERIC: config0 = read_c0_config(); /* FIXME: addresses are Malta specific */ if (config0 & (1<<24)) { diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 604b558809c4..53a7ef9a8f32 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__ ( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c3b41e24c05a..33984c04b60b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -46,6 +46,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/idle.h> +#include <asm/mips-r2-to-r6-emul.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/module.h> @@ -837,7 +838,7 @@ out: exception_exit(prev_state); } -static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) { siginfo_t info; @@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs) unsigned int opcode = 0; int status = -1; + /* + * Avoid any kernel code. Just emulate the R2 instruction + * as quickly as possible. + */ + if (mipsr2_emulation && cpu_has_mips_r6 && + likely(user_mode(regs))) { + if (likely(get_user(opcode, epc) >= 0)) { + status = mipsr2_decoder(regs, opcode); + switch (status) { + case 0: + case SIGEMT: + task_thread_info(current)->r2_emul_return = 1; + return; + case SIGILL: + goto no_r2_instr; + default: + process_fpemu_return(status, + ¤t->thread.cp0_baduaddr); + task_thread_info(current)->r2_emul_return = 1; + return; + } + } + } + +no_r2_instr: + prev_state = exception_enter(); + if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) goto out; @@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } +static int wait_on_fp_mode_switch(atomic_t *p) +{ + /* + * The FP mode for this task is currently being switched. That may + * involve modifications to the format of this tasks FP context which + * make it unsafe to proceed with execution for the moment. Instead, + * schedule some other task. + */ + schedule(); + return 0; +} + static int enable_restore_fp_context(int msa) { int err, was_fpu_owner, prior_msa; + /* + * If an FP mode switch is currently underway, wait for it to + * complete before proceeding. + */ + wait_on_atomic_t(¤t->mm->context.fp_mode_switching, + wait_on_fp_mode_switch, TASK_KILLABLE); + if (!used_math()) { /* First time FP context user. */ preempt_disable(); @@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: + case CPU_QEMU_GENERIC: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 @@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void) printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); - if (cpu_has_mips_r2 && + if ((cpu_has_mips_r2_r6) && ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("Error bits: %s%s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", @@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void) unsigned int reg_val; /* For the moment, report the problem and hang. */ - if (cpu_has_mips_r2 && + if ((cpu_has_mips_r2_r6) && ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); @@ -1959,7 +2007,7 @@ static void configure_hwrena(void) { unsigned int hwrena = cpu_hwrena_impl_bits; - if (cpu_has_mips_r2) + if (cpu_has_mips_r2_r6) hwrena |= 0x0000000f; if (!noulri && cpu_has_userlocal) @@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu) * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt */ - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; @@ -2094,7 +2142,7 @@ void __init trap_init(void) #else ebase = CKSEG0; #endif - if (cpu_has_mips_r2) + if (cpu_has_mips_r2_r6) ebase += (read_c0_ebase() & 0x3ffff000); } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index e11906dff885..bbb69695a0a1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "(%2)")"\n" \ @@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no lwl instruction */ +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n" \ + ".set\tnoat\n\t" \ + "1:"user_lb("%0", "0(%2)")"\n\t" \ + "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ @@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "(%2)")"\n" \ @@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has not lwl and ldl instructions */ +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_lbu("%0", "0(%2)")"\n\t" \ + "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:lb\t%0, 0(%2)\n\t" \ + "2:lbu\t $1, 1(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:lbu\t$1, 2(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:lbu\t$1, 3(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "5:lbu\t$1, 4(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "6:lbu\t$1, 5(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "7:lbu\t$1, 6(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "8:lbu\t$1, 7(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n\t" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ + #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ @@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs); : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_swl("%1", "(%2)")"\n" \ @@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); -#endif +#else +/* MIPSR6 has no swl and sdl instructions */ +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_sb("%1", "3(%2)")"\n\t" \ + "srl\t$1, %1, 0x8\n\t" \ + "2:"user_sb("$1", "2(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "3:"user_sb("$1", "1(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "4:"user_sb("$1", "0(%2)")"\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:sb\t%1, 7(%2)\n\t" \ + "dsrl\t$1, %1, 0x8\n\t" \ + "2:sb\t$1, 6(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "3:sb\t$1, 5(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "4:sb\t$1, 4(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "5:sb\t$1, 3(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "6:sb\t$1, 2(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "7:sb\t$1, 1(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "8:sb\t$1, 0(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); +#endif /* CONFIG_CPU_MIPSR6 */ + +#else /* __BIG_ENDIAN */ -#ifdef __LITTLE_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"user_lb("%0", "1(%2)")"\n" \ @@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "3(%2)")"\n" \ @@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no lwl instruction */ +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n" \ + ".set\tnoat\n\t" \ + "1:"user_lb("%0", "3(%2)")"\n\t" \ + "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ + #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ @@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "3(%2)")"\n" \ @@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has not lwl and ldl instructions */ +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_lbu("%0", "3(%2)")"\n\t" \ + "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:lb\t%0, 7(%2)\n\t" \ + "2:lbu\t$1, 6(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:lbu\t$1, 5(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:lbu\t$1, 4(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "5:lbu\t$1, 3(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "6:lbu\t$1, 2(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "7:lbu\t$1, 1(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "8:lbu\t$1, 0(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n\t" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ @@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); - +#ifndef CONFIG_CPU_MIPSR6 #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_swl("%1", "3(%2)")"\n" \ @@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no swl and sdl instructions */ +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_sb("%1", "0(%2)")"\n\t" \ + "srl\t$1, %1, 0x8\n\t" \ + "2:"user_sb("$1", "1(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "3:"user_sb("$1", "2(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "4:"user_sb("$1", "3(%2)")"\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:sb\t%1, 0(%2)\n\t" \ + "dsrl\t$1, %1, 0x8\n\t" \ + "2:sb\t$1, 1(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "3:sb\t$1, 2(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "4:sb\t$1, 3(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "5:sb\t$1, 4(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "6:sb\t$1, 5(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "7:sb\t$1, 6(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "8:sb\t$1, 7(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); +#endif /* CONFIG_CPU_MIPSR6 */ #endif static void emulate_load_store_insn(struct pt_regs *regs, @@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; return; +#ifndef CONFIG_CPU_MIPSR6 /* * COP2 is available to implementor for application specific use. * It's up to applications to register a notifier chain and do * whatever they have to do, including possible sending of signals. + * + * This instruction has been reallocated in Release 6 */ case lwc2_op: cu2_notifier_call_chain(CU2_LWC2_OP, regs); @@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sdc2_op: cu2_notifier_call_chain(CU2_SDC2_OP, regs); break; - +#endif default: /* * Pheeee... We encountered an yet unknown instruction or diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index eeddc58802e1..1e9e900cd3c3 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -8,6 +8,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ obj-y += iomap.o obj-$(CONFIG_PCI) += iomap-pci.o +lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 5d3238af9b5c..9245e1705e69 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -293,9 +293,14 @@ and t0, src, ADDRMASK PREFS( 0, 2*32(src) ) PREFD( 1, 2*32(dst) ) +#ifndef CONFIG_CPU_MIPSR6 bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ +#else + or t0, t0, t1 + bnez t0, .Lcopy_unaligned_bytes\@ +#endif /* * use delay slot for fall-through * src and dst are aligned; need to compute rem @@ -376,6 +381,7 @@ bne rem, len, 1b .set noreorder +#ifndef CONFIG_CPU_MIPSR6 /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch @@ -477,6 +483,7 @@ bne len, rem, 1b .set noreorder +#endif /* !CONFIG_CPU_MIPSR6 */ .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop @@ -504,6 +511,22 @@ .Ldone\@: jr ra nop + +#ifdef CONFIG_CPU_MIPSR6 +.Lcopy_unaligned_bytes\@: +1: + COPY_BYTE(0) + COPY_BYTE(1) + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) + COPY_BYTE(6) + COPY_BYTE(7) + ADD src, src, 8 + b 1b + ADD dst, dst, 8 +#endif /* CONFIG_CPU_MIPSR6 */ .if __memcpy == 1 END(memcpy) .set __memcpy, 0 diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index c8fe6b1968fb..b8e63fd00375 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -111,6 +111,7 @@ .set at #endif +#ifndef CONFIG_CPU_MIPSR6 R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ @@ -120,6 +121,30 @@ PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ +#else /* CONFIG_CPU_MIPSR6 */ +#define STORE_BYTE(N) \ + EX(sb, a1, N(a0), .Lbyte_fixup\@); \ + beqz t0, 0f; \ + PTR_ADDU t0, 1; + + PTR_ADDU a2, t0 /* correct size */ + PTR_ADDU t0, 1 + STORE_BYTE(0) + STORE_BYTE(1) +#if LONGSIZE == 4 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) +#else + STORE_BYTE(2) + STORE_BYTE(3) + STORE_BYTE(4) + STORE_BYTE(5) + EX(sb, a1, 6(a0), .Lbyte_fixup\@) +#endif +0: + ori a0, STORMASK + xori a0, STORMASK + PTR_ADDIU a0, STORSIZE +#endif /* CONFIG_CPU_MIPSR6 */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f beqz t1, .Lmemset_partial\@ /* no block to fill */ @@ -159,6 +184,7 @@ andi a2, STORMASK /* At most one long to go */ beqz a2, 1f +#ifndef CONFIG_CPU_MIPSR6 PTR_ADDU a0, a2 /* What's left */ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ @@ -166,6 +192,22 @@ #else EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif +#else + PTR_SUBU t0, $0, a2 + PTR_ADDIU t0, 1 + STORE_BYTE(0) + STORE_BYTE(1) +#if LONGSIZE == 4 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) +#else + STORE_BYTE(2) + STORE_BYTE(3) + STORE_BYTE(4) + STORE_BYTE(5) + EX(sb, a1, 6(a0), .Lbyte_fixup\@) +#endif +0: +#endif 1: jr ra move a2, zero @@ -186,6 +228,11 @@ .hidden __memset .endif +.Lbyte_fixup\@: + PTR_SUBU a2, $0, t0 + jr ra + PTR_ADDIU a2, 1 + .Lfirst_fixup\@: jr ra nop diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index be777d9a3f85..272af8ac2425 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -15,7 +15,7 @@ #include <linux/export.h> #include <linux/stringify.h> -#ifndef CONFIG_CPU_MIPSR2 +#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6) /* * For cli() we have to insert nops to make sure that the new value diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 9dfcd7fc1bc3..b30bf65c7d7d 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -48,6 +48,7 @@ #include <asm/processor.h> #include <asm/fpu_emulator.h> #include <asm/fpu.h> +#include <asm/mips-r2-to-r6-emul.h> #include "ieee754.h" @@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *, #define modeindex(v) ((v) & FPU_CSR_RM) /* convert condition code register number to csr bit */ -static const unsigned int fpucondbit[8] = { +const unsigned int fpucondbit[8] = { FPU_CSR_COND0, FPU_CSR_COND1, FPU_CSR_COND2, @@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; /* Fall through */ case jr_op: + /* For R6, JR already emulated in jalr_op */ + if (NO_R6EMU && insn.r_format.opcode == jr_op) + break; *contpc = regs->regs[insn.r_format.rs]; return 1; } @@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, switch (insn.i_format.rt) { case bltzal_op: case bltzall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bltzall_op)) + break; + regs->regs[31] = regs->cp0_epc + dec_insn.pc_inc + dec_insn.next_pc_inc; /* Fall through */ - case bltz_op: case bltzl_op: + if (NO_R6EMU) + break; + case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, return 1; case bgezal_op: case bgezall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bgezall_op)) + break; + regs->regs[31] = regs->cp0_epc + dec_insn.pc_inc + dec_insn.next_pc_inc; /* Fall through */ - case bgez_op: case bgezl_op: + if (NO_R6EMU) + break; + case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, /* Set microMIPS mode bit: XOR for jalx. */ *contpc ^= bit; return 1; - case beq_op: case beql_op: + if (NO_R6EMU) + break; + case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) *contpc = regs->cp0_epc + @@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; - case bne_op: case bnel_op: + if (NO_R6EMU) + break; + case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) *contpc = regs->cp0_epc + @@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; - case blez_op: case blezl_op: + if (NO_R6EMU) + break; + case blez_op: + + /* + * Compact branches for R6 for the + * blez and blezl opcodes. + * BLEZ | rs = 0 | rt != 0 == BLEZALC + * BLEZ | rs = rt != 0 == BGEZALC + * BLEZ | rs != 0 | rt != 0 == BGEUC + * BLEZL | rs = 0 | rt != 0 == BLEZC + * BLEZL | rs = rt != 0 == BGEZC + * BLEZL | rs != 0 | rt != 0 == BGEC + * + * For real BLEZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + } if ((long)regs->regs[insn.i_format.rs] <= 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; - case bgtz_op: case bgtzl_op: + if (NO_R6EMU) + break; + case bgtz_op: + /* + * Compact branches for R6 for the + * bgtz and bgtzl opcodes. + * BGTZ | rs = 0 | rt != 0 == BGTZALC + * BGTZ | rs = rt != 0 == BLTZALC + * BGTZ | rs != 0 | rt != 0 == BLTUC + * BGTZL | rs = 0 | rt != 0 == BGTZC + * BGTZL | rs = rt != 0 == BLTZC + * BGTZL | rs != 0 | rt != 0 == BLTC + * + * *ZALC varint for BGTZ &&& rt != 0 + * For real GTZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + } + if ((long)regs->regs[insn.i_format.rs] > 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; + case cbcond0_op: + case cbcond1_op: + if (!cpu_has_mips_r6) + break; + if (insn.i_format.rt && !insn.i_format.rs) + regs->regs[31] = regs->cp0_epc + 4; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) @@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, else *contpc = regs->cp0_epc + 8; return 1; +#else + case bc6_op: + /* + * Only valid for MIPS R6 but we can still end up + * here from a broken userland so just tell emulator + * this is not a branch and let it break later on. + */ + if (!cpu_has_mips_r6) + break; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + case balc6_op: + if (!cpu_has_mips_r6) + break; + regs->regs[31] = regs->cp0_epc + 4; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + case beqzcjic_op: + if (!cpu_has_mips_r6) + break; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + case bnezcjialc_op: + if (!cpu_has_mips_r6) + break; + if (!insn.i_format.rs) + regs->regs[31] = regs->cp0_epc + 4; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #endif case cop0_op: case cop1_op: + /* Need to check for R6 bc1nez and bc1eqz branches */ + if (cpu_has_mips_r6 && + ((insn.i_format.rs == bc1eqz_op) || + (insn.i_format.rs == bc1nez_op))) { + bit = 0; + switch (insn.i_format.rs) { + case bc1eqz_op: + if (get_fpr32(¤t->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1) + bit = 1; + break; + case bc1nez_op: + if (!(get_fpr32(¤t->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)) + bit = 1; + break; + } + if (bit) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + } + /* R2/R6 compatible cop1 instruction. Fall through */ case cop2_op: case cop1x_op: if (insn.i_format.rs == bc_op) { @@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * achieve full IEEE-754 accuracy - however this emulator does. */ case frsqrt_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_sp_rsqrt; goto scopuop; case frecip_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_sp_recip; @@ -1616,13 +1763,13 @@ copcsr: * achieve full IEEE-754 accuracy - however this emulator does. */ case frsqrt_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_dp_rsqrt; goto dcopuop; case frecip_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_dp_recip; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index dd261df005c2..3f8059602765 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) __asm__ __volatile__ ( ".set push\n\t" ".set noat\n\t" - ".set mips3\n\t" + ".set "MIPS_ISA_LEVEL"\n\t" #ifdef CONFIG_32BIT "la $at,1f\n\t" #endif @@ -1255,6 +1255,7 @@ static void probe_pcache(void) case CPU_P5600: case CPU_PROAPTIV: case CPU_M5150: + case CPU_QEMU_GENERIC: if (!(read_c0_config7() & MIPS_CONF7_IAR) && (c->icache.waysize > PAGE_SIZE)) c->icache.flags |= MIPS_CACHE_ALIASES; @@ -1472,7 +1473,8 @@ static void setup_scache(void) default: if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 70ab5d664332..7ff8637e530d 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> +#include <linux/ratelimit.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> @@ -28,6 +29,8 @@ #include <asm/highmem.h> /* For VMALLOC_END */ #include <linux/kdebug.h> +int show_unhandled_signals = 1; + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); + #if 0 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), current->comm, current->pid, field, address, write, @@ -203,15 +208,21 @@ bad_area_nosemaphore: if (user_mode(regs)) { tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; -#if 0 - printk("do_page_fault() #2: sending SIGSEGV to %s for " - "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", - tsk->comm, - write ? "write access to" : "read access from", - field, address, - field, (unsigned long) regs->cp0_epc, - field, (unsigned long) regs->regs[31]); -#endif + if (show_unhandled_signals && + unhandled_signal(tsk, SIGSEGV) && + __ratelimit(&ratelimit_state)) { + pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", + tsk->comm, + write ? "write access to" : "read access from", + field, address); + pr_info("epc = %0*lx in", field, + (unsigned long) regs->cp0_epc); + print_vma_addr(" ", regs->cp0_epc); + pr_info("ra = %0*lx in", field, + (unsigned long) regs->regs[31]); + print_vma_addr(" ", regs->regs[31]); + pr_info("\n"); + } info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index b611102e23b5..3f85f921801b 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5]; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) +/* + * R6 has a limited offset of the pref instruction. + * Skip it if the offset is more than 9 bits. + */ +#define _uasm_i_pref(a, b, c, d) \ +do { \ + if (cpu_has_mips_r6) { \ + if (c <= 0xff && c >= -0x100) \ + uasm_i_pref(a, b, c, d);\ + } else { \ + uasm_i_pref(a, b, c, d); \ + } \ +} while(0) + static int pref_bias_clear_store; static int pref_bias_copy_load; static int pref_bias_copy_store; @@ -178,7 +192,15 @@ static void set_prefetch_parameters(void) pref_bias_copy_load = 256; pref_bias_copy_store = 128; pref_src_mode = Pref_LoadStreamed; - pref_dst_mode = Pref_PrepareForStore; + if (cpu_has_mips_r6) + /* + * Bit 30 (Pref_PrepareForStore) has been + * removed from MIPS R6. Use bit 5 + * (Pref_StoreStreamed). + */ + pref_dst_mode = Pref_StoreStreamed; + else + pref_dst_mode = Pref_PrepareForStore; break; } } else { @@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off) return; if (pref_bias_clear_store) { - uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, + _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, A0); } else if (cache_line_size == (half_clear_loop_size << 1)) { if (cpu_has_cache_cdex_s) { @@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off) return; if (pref_bias_copy_load) - uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); + _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); } static inline void build_copy_store_pref(u32 **buf, int off) @@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) return; if (pref_bias_copy_store) { - uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, + _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, A0); } else if (cache_line_size == (half_copy_loop_size << 1)) { if (cpu_has_cache_cdex_s) { diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 99eb8fabab60..4ceafd13870c 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) case CPU_PROAPTIV: case CPU_P5600: case CPU_BMIPS5000: + case CPU_QEMU_GENERIC: if (config2 & (1 << 12)) return 0; } @@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void) /* Ignore anything but MIPSxx processors */ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 30639a6e9b8c..b2afa49beab0 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -485,13 +485,11 @@ static void r4k_tlb_configure(void) * Enable the no read, no exec bits, and enable large virtual * address. */ - u32 pg = PG_RIE | PG_XIE; #ifdef CONFIG_64BIT - pg |= PG_ELPA; + set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); +#else + set_c0_pagegrain(PG_RIE | PG_XIE); #endif - if (cpu_has_rixiex) - pg |= PG_IEC; - write_c0_pagegrain(pg); } temp_tlb_entry = current_cpu_data.tlbsize - 1; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 3978a3d81366..d75ff73a2012 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, case tlb_indexed: tlbw = uasm_i_tlbwi; break; } - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_exec_hazard) { /* * The architecture spec says an ehb is required here, * but a number of cores do not have the hazard and @@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_PROAPTIV: case CPU_P5600: case CPU_M5150: + case CPU_QEMU_GENERIC: break; default: @@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void) switch (current_cpu_type()) { default: - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_exec_hazard) { uasm_i_ehb(&p); case CPU_CAVIUM_OCTEON: @@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void) switch (current_cpu_type()) { default: - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_exec_hazard) { uasm_i_ehb(&p); case CPU_CAVIUM_OCTEON: diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 8399ddf03a02..d78178daea4b 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -38,14 +38,6 @@ | (e) << RE_SH \ | (f) << FUNC_SH) -/* Define these when we are not the ISA the kernel is being compiled with. */ -#ifndef CONFIG_CPU_MICROMIPS -#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) -#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) -#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) -#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) -#endif - #include "uasm.c" static struct insn insn_table_MM[] = { diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 8e02291cfc0c..b4a837893562 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -38,13 +38,13 @@ | (e) << RE_SH \ | (f) << FUNC_SH) -/* Define these when we are not the ISA the kernel is being compiled with. */ -#ifdef CONFIG_CPU_MICROMIPS -#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) -#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) -#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) -#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) -#endif +/* This macro sets the non-variable bits of an R6 instruction. */ +#define M6(a, b, c, d, e) \ + ((a) << OP_SH \ + | (b) << RS_SH \ + | (c) << RT_SH \ + | (d) << SIMM9_SH \ + | (e) << FUNC_SH) #include "uasm.c" @@ -62,7 +62,11 @@ static struct insn insn_table[] = { { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, +#ifndef CONFIG_CPU_MIPSR6 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, +#endif { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, @@ -85,13 +89,22 @@ static struct insn insn_table[] = { { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, +#ifndef CONFIG_CPU_MIPSR6 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, +#else + { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS }, +#endif { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#ifndef CONFIG_CPU_MIPSR6 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 }, + { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 }, +#endif { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, @@ -104,11 +117,20 @@ static struct insn insn_table[] = { { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, +#ifndef CONFIG_CPU_MIPSR6 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 }, +#endif { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, +#ifndef CONFIG_CPU_MIPSR6 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 }, + { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 }, +#endif { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, @@ -198,6 +220,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...) op |= build_set(va_arg(ap, u32)); if (ip->fields & SCIMM) op |= build_scimm(va_arg(ap, u32)); + if (ip->fields & SIMM9) + op |= build_scimm9(va_arg(ap, u32)); va_end(ap); **buf = op; diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 4adf30284813..319051c34343 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -24,7 +24,8 @@ enum fields { JIMM = 0x080, FUNC = 0x100, SET = 0x200, - SCIMM = 0x400 + SCIMM = 0x400, + SIMM9 = 0x800, }; #define OP_MASK 0x3f @@ -41,6 +42,8 @@ enum fields { #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 +#define SIMM9_SH 7 +#define SIMM9_MASK 0x1ff enum opcode { insn_invalid, @@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg) return (arg & SCIMM_MASK) << SCIMM_SH; } +static inline u32 build_scimm9(s32 arg) +{ + WARN((arg > 0xff || arg < -0x100), + KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg & SIMM9_MASK) << SIMM9_SH; +} + static inline u32 build_func(u32 arg) { WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); @@ -330,7 +341,7 @@ I_u3u1u2(_ldx) void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, unsigned int c) { - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index ec1dd2491f96..e1d69895fb1d 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts) int get_c0_perfcount_int(void) { if (gic_present) - return gic_get_c0_compare_int(); + return gic_get_c0_perfcount_int(); if (cp0_perfcount_irq >= 0) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c index f2355e3e65a1..f97e169393bc 100644 --- a/arch/mips/pci/pci-bcm1480.c +++ b/arch/mips/pci/pci-bcm1480.c @@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn, } struct pci_ops bcm1480_pci_ops = { - .read = bcm1480_pcibios_read, - .write = bcm1480_pcibios_write, + .read = bcm1480_pcibios_read, + .write = bcm1480_pcibios_write, }; static struct resource bcm1480_mem_resource = { diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index bedb72bd3a27..a04af55d89f1 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, static struct pci_ops octeon_pci_ops = { - .read = octeon_read_config, - .write = octeon_write_config, + .read = octeon_read_config, + .write = octeon_write_config, }; static struct resource octeon_pci_mem_resource = { diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index eb4a17ba4a53..1bb0b2bf8d6e 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn, } static struct pci_ops octeon_pcie0_ops = { - .read = octeon_pcie0_read_config, - .write = octeon_pcie0_write_config, + .read = octeon_pcie0_read_config, + .write = octeon_pcie0_write_config, }; static struct resource octeon_pcie0_mem_resource = { @@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = { }; static struct pci_ops octeon_pcie1_ops = { - .read = octeon_pcie1_read_config, - .write = octeon_pcie1_write_config, + .read = octeon_pcie1_read_config, + .write = octeon_pcie1_write_config, }; static struct resource octeon_pcie1_mem_resource = { @@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = { }; static struct pci_ops octeon_dummy_ops = { - .read = octeon_dummy_read_config, - .write = octeon_dummy_write_config, + .read = octeon_dummy_read_config, + .write = octeon_dummy_write_config, }; static struct resource octeon_dummy_mem_resource = { diff --git a/arch/mips/pmcs-msp71xx/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig index 6073ca456d11..4190093d3053 100644 --- a/arch/mips/pmcs-msp71xx/Kconfig +++ b/arch/mips/pmcs-msp71xx/Kconfig @@ -36,14 +36,14 @@ config PMC_MSP7120_FPGA endchoice config MSP_HAS_USB - boolean + bool depends on PMC_MSP config MSP_ETH - boolean + bool select MSP_HAS_MAC depends on PMC_MSP config MSP_HAS_MAC - boolean + bool depends on PMC_MSP diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c index 8f1b86d4da84..cdf187600010 100644 --- a/arch/mips/sgi-ip22/ip22-gio.c +++ b/arch/mips/sgi-ip22/ip22-gio.c @@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev) return 0; } -static int gio_device_suspend(struct device *dev, pm_message_t state) -{ - struct gio_device *gio_dev = to_gio_device(dev); - struct gio_driver *drv = to_gio_driver(dev->driver); - int error = 0; - - if (dev->driver && drv->suspend) - error = drv->suspend(gio_dev, state); - return error; -} - -static int gio_device_resume(struct device *dev) -{ - struct gio_device *gio_dev = to_gio_device(dev); - struct gio_driver *drv = to_gio_driver(dev->driver); - int error = 0; - - if (dev->driver && drv->resume) - error = drv->resume(gio_dev); - return error; -} - static void gio_device_shutdown(struct device *dev) { struct gio_device *gio_dev = to_gio_device(dev); @@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = { .match = gio_bus_match, .probe = gio_device_probe, .remove = gio_device_remove, - .suspend = gio_device_suspend, - .resume = gio_device_resume, .shutdown = gio_device_shutdown, .uevent = gio_device_uevent, }; diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c index ac37e54b3d5e..e44a15d4f573 100644 --- a/arch/mips/sgi-ip27/ip27-reset.c +++ b/arch/mips/sgi-ip27/ip27-reset.c @@ -8,6 +8,7 @@ * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ +#include <linux/compiler.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> @@ -25,9 +26,9 @@ #include <asm/sn/gda.h> #include <asm/sn/sn0/hub.h> -void machine_restart(char *command) __attribute__((noreturn)); -void machine_halt(void) __attribute__((noreturn)); -void machine_power_off(void) __attribute__((noreturn)); +void machine_restart(char *command) __noreturn; +void machine_halt(void) __noreturn; +void machine_power_off(void) __noreturn; #define noreturn while(1); /* Silence gcc. */ diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 1f823da4c77b..44b3470a0bbb 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -8,6 +8,7 @@ * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> */ +#include <linux/compiler.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -35,9 +36,9 @@ static struct timer_list power_timer, blink_timer, debounce_timer; static int has_panicked, shuting_down; -static void ip32_machine_restart(char *command) __attribute__((noreturn)); -static void ip32_machine_halt(void) __attribute__((noreturn)); -static void ip32_machine_power_off(void) __attribute__((noreturn)); +static void ip32_machine_restart(char *command) __noreturn; +static void ip32_machine_halt(void) __noreturn; +static void ip32_machine_power_off(void) __noreturn; static void ip32_machine_restart(char *cmd) { diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index afab728ab65e..96d3f9deb59c 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -56,7 +56,9 @@ extern void paging_init(void); #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ +#define __PAGETABLE_PUD_FOLDED #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PTE 1024 #define PGD_SIZE PAGE_SIZE diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c deleted file mode 100644 index bd65dae17f32..000000000000 --- a/arch/mn10300/unit-asb2305/pci-iomap.c +++ /dev/null @@ -1,35 +0,0 @@ -/* ASB2305 PCI I/O mapping handler - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#include <linux/pci.h> -#include <linux/module.h> - -/* - * Create a virtual mapping cookie for a PCI BAR (memory or IO) - */ -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (!len || !start) - return NULL; - - if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) { - if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO)) - return ioremap(start, len); - else - return ioremap_nocache(start, len); - } - - return NULL; -} -EXPORT_SYMBOL(pci_iomap); diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index ab2e7a198a4c..a6bd07ca3d6c 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -192,7 +192,7 @@ struct __large_struct { ({ \ long __gu_err, __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -202,7 +202,7 @@ struct __large_struct { const __typeof__(*(ptr)) * __gu_addr = (ptr); \ if (access_ok(VERIFY_READ, __gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 91fbb6ee702c..965a0999fc4c 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -148,7 +148,7 @@ endef # we require gcc 3.3 or above to compile the kernel archprepare: checkbin checkbin: - @if test "$(call cc-version)" -lt "0303"; then \ + @if test "$(cc-version)" -lt "0303"; then \ echo -n "Sorry, GCC v3.3 or above is required to build " ; \ echo "the kernel." ; \ false ; \ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 8c966b2270aa..15207b9362bf 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #if PT_NLEVELS == 3 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) #else +#define __PAGETABLE_PMD_FOLDED #define BITS_PER_PMD 0 #endif #define PTRS_PER_PMD (1UL << BITS_PER_PMD) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 132d9c681d6a..fc502e042438 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -314,7 +314,7 @@ TOUT := .tmp_gas_check # - Require gcc 4.0 or above on 64-bit # - gcc-4.2.0 has issues compiling modules on 64-bit checkbin: - @if test "$(call cc-version)" = "0304" ; then \ + @if test "$(cc-version)" = "0304" ; then \ if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ echo 'correctly with gcc-3.4 and your version of binutils.'; \ @@ -322,13 +322,13 @@ checkbin: false; \ fi ; \ fi - @if test "$(call cc-version)" -lt "0400" \ + @if test "$(cc-version)" -lt "0400" \ && test "x${CONFIG_PPC64}" = "xy" ; then \ echo -n "Sorry, GCC v4.0 or above is required to build " ; \ echo "the 64-bit powerpc kernel." ; \ false ; \ fi - @if test "$(call cc-fullversion)" = "040200" \ + @if test "$(cc-fullversion)" = "040200" \ && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ echo 'kernel with modules enabled.' ; \ diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 51866f170684..ca7957b09a3c 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -142,6 +142,7 @@ CONFIG_VIRT_DRIVERS=y CONFIG_FSL_HV_MANAGER=y CONFIG_STAGING=y CONFIG_FSL_CORENET_CF=y +CONFIG_CLK_QORIQ=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index d6c0c8198952..04737aaa8b6b 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -122,6 +122,7 @@ CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y CONFIG_VIRT_DRIVERS=y CONFIG_FSL_HV_MANAGER=y +CONFIG_CLK_QORIQ=y CONFIG_FSL_CORENET_CF=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 7316dd15278a..2d7b33fab953 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -54,6 +54,7 @@ #include <linux/irq.h> #include <linux/delay.h> #include <linux/irq_work.h> +#include <linux/clk-provider.h> #include <asm/trace.h> #include <asm/io.h> @@ -975,6 +976,10 @@ void __init time_init(void) init_decrementer_clockevent(); tick_setup_hrtimer_broadcast(); + +#ifdef CONFIG_COMMON_CLK + of_clk_init(NULL); +#endif } diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index 6eb614a271fb..f691bcabd710 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -1168,6 +1168,11 @@ static void mpc5121_clk_provide_backwards_compat(void) } } +/* + * The "fixed-clock" nodes (which includes the oscillator node if the board's + * DT provides one) has already been scanned by the of_clk_init() in + * time_init(). + */ int __init mpc5121_clk_init(void) { struct device_node *clk_np; @@ -1187,12 +1192,6 @@ int __init mpc5121_clk_init(void) mpc512x_clk_preset_data(); /* - * have the device tree scanned for "fixed-clock" nodes (which - * includes the oscillator node if the board's DT provides one) - */ - of_clk_init(NULL); - - /* * add a dummy clock for those situations where a clock spec is * required yet no real clock is involved */ diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 4c8008dd938e..99824ff8dd35 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry) parent = dentry->d_parent; mutex_lock(&parent->d_inode->i_mutex); if (hypfs_positive(dentry)) { - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); @@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp) return nonseekable_open(inode, filp); } -static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t offset) +static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { - char *data; - ssize_t ret; - struct file *filp = iocb->ki_filp; - /* XXX: temporary */ - char __user *buf = iov[0].iov_base; - size_t count = iov[0].iov_len; - - if (nr_segs != 1) - return -EINVAL; - - data = filp->private_data; - ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data)); - if (ret <= 0) - return ret; + struct file *file = iocb->ki_filp; + char *data = file->private_data; + size_t available = strlen(data); + loff_t pos = iocb->ki_pos; + size_t count; - iocb->ki_pos += ret; - file_accessed(filp); - - return ret; + if (pos < 0) + return -EINVAL; + if (pos >= available || !iov_iter_count(to)) + return 0; + count = copy_to_iter(data + pos, available - pos, to); + if (!count) + return -EFAULT; + iocb->ki_pos = pos + count; + file_accessed(file); + return count; } -static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t offset) + +static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from) { int rc; struct super_block *sb = file_inode(iocb->ki_filp)->i_sb; struct hypfs_sb_info *fs_info = sb->s_fs_info; - size_t count = iov_length(iov, nr_segs); + size_t count = iov_iter_count(from); /* * Currently we only allow one update per second for two reasons: @@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, } hypfs_update_update(sb); rc = count; + iov_iter_advance(from, count); out: mutex_unlock(&fs_info->lock); return rc; @@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir, static const struct file_operations hypfs_file_ops = { .open = hypfs_open, .release = hypfs_release, - .read = do_sync_read, - .write = do_sync_write, - .aio_read = hypfs_aio_read, - .aio_write = hypfs_aio_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = hypfs_read_iter, + .write_iter = hypfs_write_iter, .llseek = no_llseek, }; diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index f664e96f48c7..1a9a98de5bde 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -16,6 +16,7 @@ struct zpci_iomap_entry { u32 fh; u8 bar; + u16 count; }; extern struct zpci_iomap_entry *zpci_iomap_start; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fbb5ee3ae57c..e08ec38f8c6e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -91,7 +91,9 @@ extern unsigned long zero_page_mask; */ #define PTRS_PER_PTE 256 #ifndef CONFIG_64BIT +#define __PAGETABLE_PUD_FOLDED #define PTRS_PER_PMD 1 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PUD 1 #else /* CONFIG_64BIT */ #define PTRS_PER_PMD 2048 diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c4fbb9527c5c..b1453a2ae1ca 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -18,15 +18,15 @@ struct cpu_topology_s390 { cpumask_t book_mask; }; -extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; +DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); -#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) -#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) -#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask) -#define topology_core_id(cpu) (cpu_topology[cpu].core_id) -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) -#define topology_book_id(cpu) (cpu_topology[cpu].book_id) -#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) +#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) +#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) +#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask) +#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) +#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) +#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) +#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) #define mc_capable() 1 @@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { } #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) -#ifdef CONFIG_SCHED_BOOK -void s390_init_cpu_topology(void); -#else -static inline void s390_init_cpu_topology(void) -{ -}; -#endif - #include <asm-generic/topology.h> #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index 632fa06ea162..0969d113b3d6 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level) { if (level >= CACHE_MAX_LEVEL) return CACHE_TYPE_NOCACHE; - ci += level; - if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) return CACHE_TYPE_NOCACHE; - return cache_type_map[ci->type]; } @@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti) } static void ci_leaf_init(struct cacheinfo *this_leaf, int private, - enum cache_type type, unsigned int level) + enum cache_type type, unsigned int level, int cpu) { int ti, num_sets; - int cpu = smp_processor_id(); if (type == CACHE_TYPE_INST) ti = CACHE_TI_INSTRUCTION; else ti = CACHE_TI_UNIFIED; - this_leaf->level = level + 1; this_leaf->type = type; this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); - this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, - level, ti); + this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); this_leaf->size = ecag(EXTRACT_SIZE, level, ti); - num_sets = this_leaf->size / this_leaf->coherency_line_size; num_sets /= this_leaf->ways_of_associativity; this_leaf->number_of_sets = num_sets; @@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu) if (!this_cpu_ci) return -EINVAL; - ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); do { ctype = get_cache_type(&ct.ci[0], level); @@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu) /* Separate instruction and data caches */ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; } while (++level < CACHE_MAX_LEVEL); - this_cpu_ci->num_levels = level; this_cpu_ci->num_leaves = leaves; - return 0; } int populate_cache_leaves(unsigned int cpu) { + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; unsigned int level, idx, pvt; union cache_topology ct; enum cache_type ctype; - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct cacheinfo *this_leaf = this_cpu_ci->info_list; ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); for (idx = 0, level = 0; level < this_cpu_ci->num_levels && idx < this_cpu_ci->num_leaves; idx++, level++) { if (!this_leaf) return -EINVAL; - pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; ctype = get_cache_type(&ct.ci[0], level); if (ctype == CACHE_TYPE_SEPARATE) { - ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); - ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu); + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu); } else { - ci_leaf_init(this_leaf++, pvt, ctype, level); + ci_leaf_init(this_leaf++, pvt, ctype, level, cpu); } } return 0; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 70a329450901..4427ab7ac23a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) S390_lowcore.machine_flags |= MACHINE_FLAG_VX; - if (test_facility(128)) - S390_lowcore.machine_flags |= MACHINE_FLAG_CAD; #endif } -static int __init nocad_setup(char *str) +static int __init cad_setup(char *str) { - S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD; + int val; + + get_option(&str, &val); + if (val && test_facility(128)) + S390_lowcore.machine_flags |= MACHINE_FLAG_CAD; return 0; } -early_param("nocad", nocad_setup); +early_param("cad", cad_setup); static int __init cad_init(void) { diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index bfac77ada4f2..a5ea8bc17cb3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p) setup_lowcore(); smp_fill_possible_mask(); cpu_init(); - s390_init_cpu_topology(); /* * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a668993ff577..db8f1115a3bf 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,14 +59,13 @@ enum { CPU_STATE_CONFIGURED, }; +static DEFINE_PER_CPU(struct cpu *, cpu_device); + struct pcpu { - struct cpu *cpu; struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ - unsigned long async_stack; /* async stack for the cpu */ - unsigned long panic_stack; /* panic stack for the cpu */ unsigned long ec_mask; /* bit mask for ec_xxx functions */ - int state; /* physical cpu state */ - int polarization; /* physical polarization */ + signed char state; /* physical cpu state */ + signed char polarization; /* physical polarization */ u16 address; /* physical cpu address */ }; @@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) pcpu_sigp_retry(pcpu, order, 0); } +#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) +#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) + static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) { + unsigned long async_stack, panic_stack; struct _lowcore *lc; if (pcpu != &pcpu_devices[0]) { pcpu->lowcore = (struct _lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); - pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); - pcpu->panic_stack = __get_free_page(GFP_KERNEL); - if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) + async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + panic_stack = __get_free_page(GFP_KERNEL); + if (!pcpu->lowcore || !panic_stack || !async_stack) goto out; + } else { + async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET; + panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET; } lc = pcpu->lowcore; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); - lc->async_stack = pcpu->async_stack + ASYNC_SIZE - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); - lc->panic_stack = pcpu->panic_stack + PAGE_SIZE - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->async_stack = async_stack + ASYNC_FRAME_OFFSET; + lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); #ifndef CONFIG_64BIT @@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) return 0; out: if (pcpu != &pcpu_devices[0]) { - free_page(pcpu->panic_stack); - free_pages(pcpu->async_stack, ASYNC_ORDER); + free_page(panic_stack); + free_pages(async_stack, ASYNC_ORDER); free_pages((unsigned long) pcpu->lowcore, LC_ORDER); } return -ENOMEM; @@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) #else vdso_free_per_cpu(pcpu->lowcore); #endif - if (pcpu != &pcpu_devices[0]) { - free_page(pcpu->panic_stack); - free_pages(pcpu->async_stack, ASYNC_ORDER); - free_pages((unsigned long) pcpu->lowcore, LC_ORDER); - } + if (pcpu == &pcpu_devices[0]) + return; + free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); + free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER); + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data) void smp_call_ipl_cpu(void (*func)(void *), void *data) { pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->panic_stack + PAGE_SIZE); + pcpu_devices->lowcore->panic_stack - + PANIC_FRAME_OFFSET + PAGE_SIZE); } int smp_find_processor_id(u16 address) @@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void) pcpu->state = CPU_STATE_CONFIGURED; pcpu->address = stap(); pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); - pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); - pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); @@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = pcpu_devices[cpu].cpu; - struct device *s = &c->dev; + struct device *s = &per_cpu(cpu_device, cpu)->dev; int err = 0; switch (action & ~CPU_TASKS_FROZEN) { @@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu) c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; - pcpu_devices[cpu].cpu = c; + per_cpu(cpu_device, cpu) = c; s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 24ee33f1af24..14da43b801d9 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -7,14 +7,14 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/workqueue.h> -#include <linux/bootmem.h> #include <linux/cpuset.h> #include <linux/device.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/init.h> #include <linux/delay.h> +#include <linux/init.h> +#include <linux/slab.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/mm.h> @@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock); static struct mask_info socket_info; static struct mask_info book_info; -struct cpu_topology_s390 cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); +DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); +EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { @@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, if (lcpu < 0) continue; for (i = 0; i <= smp_cpu_mtid; i++) { - cpu_topology[lcpu + i].book_id = book->id; - cpu_topology[lcpu + i].core_id = rcore; - cpu_topology[lcpu + i].thread_id = lcpu + i; + per_cpu(cpu_topology, lcpu + i).book_id = book->id; + per_cpu(cpu_topology, lcpu + i).core_id = rcore; + per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; cpumask_set_cpu(lcpu + i, &book->mask); cpumask_set_cpu(lcpu + i, &socket->mask); if (one_socket_per_cpu) - cpu_topology[lcpu + i].socket_id = rcore; + per_cpu(cpu_topology, lcpu + i).socket_id = rcore; else - cpu_topology[lcpu + i].socket_id = socket->id; + per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; smp_cpu_set_polarization(lcpu + i, tl_core->pp); } if (one_socket_per_cpu) @@ -249,14 +249,14 @@ static void update_cpu_masks(void) spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { - cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); - cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); - cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); + per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); + per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); + per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); if (!MACHINE_HAS_TOPOLOGY) { - cpu_topology[cpu].thread_id = cpu; - cpu_topology[cpu].core_id = cpu; - cpu_topology[cpu].socket_id = cpu; - cpu_topology[cpu].book_id = cpu; + per_cpu(cpu_topology, cpu).thread_id = cpu; + per_cpu(cpu_topology, cpu).core_id = cpu; + per_cpu(cpu_topology, cpu).socket_id = cpu; + per_cpu(cpu_topology, cpu).book_id = cpu; } } spin_unlock_irqrestore(&topology_lock, flags); @@ -334,50 +334,6 @@ void topology_expect_change(void) set_topology_timer(); } -static int __init early_parse_topology(char *p) -{ - if (strncmp(p, "off", 3)) - return 0; - topology_enabled = 0; - return 0; -} -early_param("topology", early_parse_topology); - -static void __init alloc_masks(struct sysinfo_15_1_x *info, - struct mask_info *mask, int offset) -{ - int i, nr_masks; - - nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; - for (i = 0; i < info->mnest - offset; i++) - nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; - nr_masks = max(nr_masks, 1); - for (i = 0; i < nr_masks; i++) { - mask->next = alloc_bootmem_align( - roundup_pow_of_two(sizeof(struct mask_info)), - roundup_pow_of_two(sizeof(struct mask_info))); - mask = mask->next; - } -} - -void __init s390_init_cpu_topology(void) -{ - struct sysinfo_15_1_x *info; - int i; - - if (!MACHINE_HAS_TOPOLOGY) - return; - tl_info = alloc_bootmem_pages(PAGE_SIZE); - info = tl_info; - store_topology(info); - pr_info("The CPU configuration topology of the machine is:"); - for (i = 0; i < TOPOLOGY_NR_MAG; i++) - printk(KERN_CONT " %d", info->mag[i]); - printk(KERN_CONT " / %d\n", info->mnest); - alloc_masks(info, &socket_info, 1); - alloc_masks(info, &book_info, 2); -} - static int cpu_management; static ssize_t dispatching_show(struct device *dev, @@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu) const struct cpumask *cpu_thread_mask(int cpu) { - return &cpu_topology[cpu].thread_mask; + return &per_cpu(cpu_topology, cpu).thread_mask; } const struct cpumask *cpu_coregroup_mask(int cpu) { - return &cpu_topology[cpu].core_mask; + return &per_cpu(cpu_topology, cpu).core_mask; } static const struct cpumask *cpu_book_mask(int cpu) { - return &cpu_topology[cpu].book_mask; + return &per_cpu(cpu_topology, cpu).book_mask; } +static int __init early_parse_topology(char *p) +{ + if (strncmp(p, "off", 3)) + return 0; + topology_enabled = 0; + return 0; +} +early_param("topology", early_parse_topology); + static struct sched_domain_topology_level s390_topology[] = { { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, @@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = { { NULL, }, }; +static void __init alloc_masks(struct sysinfo_15_1_x *info, + struct mask_info *mask, int offset) +{ + int i, nr_masks; + + nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; + for (i = 0; i < info->mnest - offset; i++) + nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; + nr_masks = max(nr_masks, 1); + for (i = 0; i < nr_masks; i++) { + mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); + mask = mask->next; + } +} + +static int __init s390_topology_init(void) +{ + struct sysinfo_15_1_x *info; + int i; + + if (!MACHINE_HAS_TOPOLOGY) + return 0; + tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); + info = tl_info; + store_topology(info); + pr_info("The CPU configuration topology of the machine is:"); + for (i = 0; i < TOPOLOGY_NR_MAG; i++) + printk(KERN_CONT " %d", info->mag[i]); + printk(KERN_CONT " / %d\n", info->mnest); + alloc_masks(info, &socket_info, 1); + alloc_masks(info, &book_info, 2); + set_sched_topology(s390_topology); + return 0; +} +early_initcall(s390_topology_init); + static int __init topology_init(void) { if (MACHINE_HAS_TOPOLOGY) @@ -498,10 +499,3 @@ static int __init topology_init(void) return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); - -static int __init early_topology_init(void) -{ - set_sched_topology(s390_topology); - return 0; -} -early_initcall(early_topology_init); diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 7699e735ae28..61541fb93dc6 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -25,9 +25,7 @@ __kernel_clock_gettime: je 4f cghi %r2,__CLOCK_REALTIME je 5f - cghi %r2,__CLOCK_THREAD_CPUTIME_ID - je 9f - cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ + cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ je 9f cghi %r2,__CLOCK_MONOTONIC_COARSE je 3f @@ -106,7 +104,7 @@ __kernel_clock_gettime: aghi %r15,16 br %r14 - /* CLOCK_THREAD_CPUTIME_ID for this thread */ + /* CPUCLOCK_VIRT for this thread */ 9: icm %r0,15,__VDSO_ECTG_OK(%r5) jz 12f ear %r2,%a4 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index d008f638b2cd..179a2c20b01f 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void) { unsigned long base; - base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT); + base = STACK_TOP / 3 * 2; + if (!is_32bit_task()) + /* Align to 4GB */ + base &= ~((1UL << 32) - 1); return base + mmap_rnd(); } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 3290f11ae1d9..753a56731951 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) } /* Create a virtual mapping cookie for a PCI BAR */ -void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) +void __iomem *pci_iomap_range(struct pci_dev *pdev, + int bar, + unsigned long offset, + unsigned long max) { struct zpci_dev *zdev = get_zdev(pdev); u64 addr; @@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) idx = zdev->bars[bar].map_idx; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = zdev->fh; - zpci_iomap_start[idx].bar = bar; + if (zpci_iomap_start[idx].count++) { + BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || + zpci_iomap_start[idx].bar != bar); + } else { + zpci_iomap_start[idx].fh = zdev->fh; + zpci_iomap_start[idx].bar = bar; + } + /* Detect overrun */ + BUG_ON(!zpci_iomap_start[idx].count); spin_unlock(&zpci_iomap_lock); addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); - return (void __iomem *) addr; + return (void __iomem *) addr + offset; } -EXPORT_SYMBOL_GPL(pci_iomap); +EXPORT_SYMBOL_GPL(pci_iomap_range); + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL(pci_iomap); void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) { @@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = 0; - zpci_iomap_start[idx].bar = 0; + /* Detect underrun */ + BUG_ON(!zpci_iomap_start[idx].count); + if (!--zpci_iomap_start[idx].count) { + zpci_iomap_start[idx].fh = 0; + zpci_iomap_start[idx].bar = 0; + } spin_unlock(&zpci_iomap_lock); } EXPORT_SYMBOL_GPL(pci_iounmap); diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 5e2725f4ac49..ff795d3a6909 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -23,7 +23,7 @@ typedef struct { #define USER_DS KERNEL_DS #endif -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define get_ds() (KERNEL_DS) diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 9486376605f4..a49635c51266 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -60,7 +60,7 @@ struct __large_struct { unsigned long buf[100]; }; const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -71,7 +71,7 @@ struct __large_struct { unsigned long buf[100]; }; const __typeof__(*(ptr)) *__gu_addr = (ptr); \ if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 2e07e0f40c6a..c01376c76b86 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -59,19 +59,19 @@ do { \ switch (size) { \ case 1: \ retval = __put_user_asm_b((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ case 2: \ retval = __put_user_asm_w((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ case 4: \ retval = __put_user_asm_l((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ case 8: \ retval = __put_user_asm_q((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ default: \ __put_user_unknown(); \ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 9634d086fc56..64ee103dc29d 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -37,7 +37,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test * can be fairly lightweight. @@ -46,8 +46,8 @@ */ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) -#define access_ok(type, addr, size) \ +#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) +#define access_ok(type, addr, size) \ ({ (void)(type); __access_ok((unsigned long)(addr), size); }) /* @@ -91,158 +91,221 @@ void __ret_efault(void); * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) - -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define put_user(x, ptr) ({ \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ +}) + +#define get_user(x, ptr) ({ \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ +}) /* * The "__xxx" versions do not do address space checking, useful when * doing multiple accesses to the same area (the user has to do the * checks by hand with "access_ok()") */ -#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr))) struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct __user *)(x)) -#define __put_user_check(x,addr,size) ({ \ -register int __pu_ret; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(x,,addr,__pu_ret); break; \ -case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} } else { __pu_ret = -EFAULT; } __pu_ret; }) - -#define __put_user_nocheck(x,addr,size) ({ \ -register int __pu_ret; \ -switch (size) { \ -case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(x,,addr,__pu_ret); break; \ -case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -#define __put_user_asm(x,size,addr,ret) \ +#define __put_user_check(x, addr, size) ({ \ + register int __pu_ret; \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __put_user_asm(x, b, addr, __pu_ret); \ + break; \ + case 2: \ + __put_user_asm(x, h, addr, __pu_ret); \ + break; \ + case 4: \ + __put_user_asm(x, , addr, __pu_ret); \ + break; \ + case 8: \ + __put_user_asm(x, d, addr, __pu_ret); \ + break; \ + default: \ + __pu_ret = __put_user_bad(); \ + break; \ + } \ + } else { \ + __pu_ret = -EFAULT; \ + } \ + __pu_ret; \ +}) + +#define __put_user_nocheck(x, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: __put_user_asm(x, b, addr, __pu_ret); break; \ + case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ + case 4: __put_user_asm(x, , addr, __pu_ret); break; \ + case 8: __put_user_asm(x, d, addr, __pu_ret); break; \ + default: __pu_ret = __put_user_bad(); break; \ + } \ + __pu_ret; \ +}) + +#define __put_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Put user asm, inline. */\n" \ -"1:\t" "st"#size " %1, %2\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "b 2b\n\t" \ - " mov %3, %0\n\t" \ - ".previous\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ - ".previous\n\n\t" \ - : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ - "i" (-EFAULT)) + "/* Put user asm, inline. */\n" \ + "1:\t" "st"#size " %1, %2\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "b 2b\n\t" \ + " mov %3, %0\n\t" \ + ".previous\n\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\t" \ + ".previous\n\n\t" \ + : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ + "i" (-EFAULT)) int __put_user_bad(void); -#define __get_user_check(x,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; }) - -#define __get_user_check_ret(x,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} x = (type) __gu_val; } else return retval; }) - -#define __get_user_nocheck(x,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} x = (type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} x = (type) __gu_val; }) - -#define __get_user_asm(x,size,addr,ret) \ +#define __get_user_check(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm(__gu_val, ub, addr, __gu_ret); \ + break; \ + case 2: \ + __get_user_asm(__gu_val, uh, addr, __gu_ret); \ + break; \ + case 4: \ + __get_user_asm(__gu_val, , addr, __gu_ret); \ + break; \ + case 8: \ + __get_user_asm(__gu_val, d, addr, __gu_ret); \ + break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + } else { \ + __gu_val = 0; \ + __gu_ret = -EFAULT; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_check_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm_ret(__gu_val, ub, addr, retval); \ + break; \ + case 2: \ + __get_user_asm_ret(__gu_val, uh, addr, retval); \ + break; \ + case 4: \ + __get_user_asm_ret(__gu_val, , addr, retval); \ + break; \ + case 8: \ + __get_user_asm_ret(__gu_val, d, addr, retval); \ + break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ + } else \ + return retval; \ +}) + +#define __get_user_nocheck(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ + case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \ + case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ + case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \ + case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ +}) + +#define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Get user asm, inline. */\n" \ -"1:\t" "ld"#size " %2, %1\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "clr %1\n\t" \ - "b 2b\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ - "i" (-EFAULT)) - -#define __get_user_asm_ret(x,size,addr,retval) \ + "/* Get user asm, inline. */\n" \ + "1:\t" "ld"#size " %2, %1\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "clr %1\n\t" \ + "b 2b\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ + "i" (-EFAULT)) + +#define __get_user_asm_ret(x, size, addr, retval) \ if (__builtin_constant_p(retval) && retval == -EFAULT) \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size " %1, %0\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=&r" (x) : "m" (*__m(addr))); \ + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size " %1, %0\n\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b,__ret_efault\n\n\t" \ + ".previous\n\t" \ + : "=&r" (x) : "m" (*__m(addr))); \ else \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size " %1, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size " %1, %0\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "ret\n\t" \ + " restore %%g0, %2, %%o0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) int __get_user_bad(void); diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index c990a5e577f0..a35194b7dba0 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -41,11 +41,11 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) #define get_ds() (KERNEL_DS) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define set_fs(val) \ do { \ - current_thread_info()->current_ds =(val).seg; \ + current_thread_info()->current_ds = (val).seg; \ __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ } while(0) @@ -88,121 +88,135 @@ void __retl_efault(void); * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) +#define put_user(x, ptr) ({ \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ +}) -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define get_user(x, ptr) ({ \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ +}) -#define __put_user(x,ptr) put_user(x,ptr) -#define __get_user(x,ptr) get_user(x,ptr) +#define __put_user(x, ptr) put_user(x, ptr) +#define __get_user(x, ptr) get_user(x, ptr) struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct *)(x)) -#define __put_user_nocheck(data,addr,size) ({ \ -register int __pu_ret; \ -switch (size) { \ -case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ -case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -#define __put_user_asm(x,size,addr,ret) \ +#define __put_user_nocheck(data, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ + case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ + case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ + case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ + default: __pu_ret = __put_user_bad(); break; \ + } \ + __pu_ret; \ +}) + +#define __put_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Put user asm, inline. */\n" \ -"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "sethi %%hi(2b), %0\n\t" \ - "jmpl %0 + %%lo(2b), %%g0\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ - ".previous\n\n\t" \ - : "=r" (ret) : "r" (x), "r" (__m(addr)), \ - "i" (-EFAULT)) + "/* Put user asm, inline. */\n" \ + "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "sethi %%hi(2b), %0\n\t" \ + "jmpl %0 + %%lo(2b), %%g0\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\t" \ + ".previous\n\n\t" \ + : "=r" (ret) : "r" (x), "r" (__m(addr)), \ + "i" (-EFAULT)) int __put_user_bad(void); -#define __get_user_nocheck(data,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} data = (type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} data = (type) __gu_val; }) - -#define __get_user_asm(x,size,addr,ret) \ +#define __get_user_nocheck(data, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ + case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ + case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + data = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ + case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \ + case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + data = (__force type) __gu_val; \ +}) + +#define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Get user asm, inline. */\n" \ -"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "sethi %%hi(2b), %0\n\t" \ - "clr %1\n\t" \ - "jmpl %0 + %%lo(2b), %%g0\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ - "i" (-EFAULT)) - -#define __get_user_asm_ret(x,size,addr,retval) \ + "/* Get user asm, inline. */\n" \ + "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "sethi %%hi(2b), %0\n\t" \ + "clr %1\n\t" \ + "jmpl %0 + %%lo(2b), %%g0\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ + "i" (-EFAULT)) + +#define __get_user_asm_ret(x, size, addr, retval) \ if (__builtin_constant_p(retval) && retval == -EFAULT) \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr))); \ + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b,__ret_efault\n\n\t" \ + ".previous\n\t" \ + : "=r" (x) : "r" (__m(addr))); \ else \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr)), "i" (retval)) + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "ret\n\t" \ + " restore %%g0, %2, %%o0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=r" (x) : "r" (__m(addr)), "i" (retval)) int __get_user_bad(void); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index eb1cf898ed3c..c2fb8a87dccb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -488,6 +488,22 @@ config X86_INTEL_MID Intel MID platforms are based on an Intel processor and chipset which consume less power than most of the x86 derivatives. +config X86_INTEL_QUARK + bool "Intel Quark platform support" + depends on X86_32 + depends on X86_EXTENDED_PLATFORM + depends on X86_PLATFORM_DEVICES + depends on X86_TSC + depends on PCI + depends on PCI_GOANY + depends on X86_IO_APIC + select IOSF_MBI + select INTEL_IMR + ---help--- + Select to include support for Quark X1000 SoC. + Say Y here if you have a Quark based system such as the Arduino + compatible Intel Galileo. + config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" depends on ACPI diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 61bd2ad94281..20028da8ae18 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -313,6 +313,19 @@ config DEBUG_NMI_SELFTEST If unsure, say N. +config DEBUG_IMR_SELFTEST + bool "Isolated Memory Region self test" + default n + depends on INTEL_IMR + ---help--- + This option enables automated sanity testing of the IMR code. + Some simple tests are run to verify IMR bounds checking, alignment + and overlapping. This option is really only useful if you are + debugging an IMR memory map or are modifying the IMR code and want to + test your changes. + + If unsure say N here. + config X86_DEBUG_STATIC_CPU_HAS bool "Debug alternatives" depends on DEBUG_KERNEL diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 36b62bc52638..95eba554baf9 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -30,7 +30,7 @@ cflags-y += -ffreestanding # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use # a lot more stack due to the lack of sharing of stacklots. Also, gcc # 4.3.0 needs -funit-at-a-time for extern inline functions. -KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \ +KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \ echo $(call cc-option,-fno-unit-at-a-time); \ else echo $(call cc-option,-funit-at-a-time); fi ;) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 843feb3eb20b..0a291cdfaf77 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -51,6 +51,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ $(objtree)/drivers/firmware/efi/libstub/lib.a +vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o $(obj)/vmlinux: $(vmlinux-objs-y) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index bb1376381985..7083c16cccba 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -14,6 +14,13 @@ static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; +struct kaslr_setup_data { + __u64 next; + __u32 type; + __u32 len; + __u8 data[1]; +} kaslr_setup_data; + #define I8254_PORT_CONTROL 0x43 #define I8254_PORT_COUNTER0 0x40 #define I8254_CMD_READBACK 0xC0 @@ -295,7 +302,29 @@ static unsigned long find_random_addr(unsigned long minimum, return slots_fetch_random(); } -unsigned char *choose_kernel_location(unsigned char *input, +static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) +{ + struct setup_data *data; + + kaslr_setup_data.type = SETUP_KASLR; + kaslr_setup_data.len = 1; + kaslr_setup_data.next = 0; + kaslr_setup_data.data[0] = enabled; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + if (data) + data->next = (unsigned long)&kaslr_setup_data; + else + params->hdr.setup_data = (unsigned long)&kaslr_setup_data; + +} + +unsigned char *choose_kernel_location(struct boot_params *params, + unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) @@ -306,14 +335,17 @@ unsigned char *choose_kernel_location(unsigned char *input, #ifdef CONFIG_HIBERNATION if (!cmdline_find_option_bool("kaslr")) { debug_putstr("KASLR disabled by default...\n"); + add_kaslr_setup_data(params, 0); goto out; } #else if (cmdline_find_option_bool("nokaslr")) { debug_putstr("KASLR disabled by cmdline...\n"); + add_kaslr_setup_data(params, 0); goto out; } #endif + add_kaslr_setup_data(params, 1); /* Record the various known unsafe memory ranges. */ mem_avoid_init((unsigned long)input, input_size, diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S index 7ff3632806b1..99494dff2113 100644 --- a/arch/x86/boot/compressed/efi_stub_64.S +++ b/arch/x86/boot/compressed/efi_stub_64.S @@ -3,28 +3,3 @@ #include <asm/processor-flags.h> #include "../../platform/efi/efi_stub_64.S" - -#ifdef CONFIG_EFI_MIXED - .code64 - .text -ENTRY(efi64_thunk) - push %rbp - push %rbx - - subq $16, %rsp - leaq efi_exit32(%rip), %rax - movl %eax, 8(%rsp) - leaq efi_gdt64(%rip), %rax - movl %eax, 4(%rsp) - movl %eax, 2(%rax) /* Fixup the gdt base address */ - leaq efi32_boot_gdt(%rip), %rax - movl %eax, (%rsp) - - call __efi64_thunk - - addq $16, %rsp - pop %rbx - pop %rbp - ret -ENDPROC(efi64_thunk) -#endif /* CONFIG_EFI_MIXED */ diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S new file mode 100644 index 000000000000..630384a4c14a --- /dev/null +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming + * + * Early support for invoking 32-bit EFI services from a 64-bit kernel. + * + * Because this thunking occurs before ExitBootServices() we have to + * restore the firmware's 32-bit GDT before we make EFI serivce calls, + * since the firmware's 32-bit IDT is still currently installed and it + * needs to be able to service interrupts. + * + * On the plus side, we don't have to worry about mangling 64-bit + * addresses into 32-bits because we're executing with an identify + * mapped pagetable and haven't transitioned to 64-bit virtual addresses + * yet. + */ + +#include <linux/linkage.h> +#include <asm/msr.h> +#include <asm/page_types.h> +#include <asm/processor-flags.h> +#include <asm/segment.h> + + .code64 + .text +ENTRY(efi64_thunk) + push %rbp + push %rbx + + subq $8, %rsp + leaq efi_exit32(%rip), %rax + movl %eax, 4(%rsp) + leaq efi_gdt64(%rip), %rax + movl %eax, (%rsp) + movl %eax, 2(%rax) /* Fixup the gdt base address */ + + movl %ds, %eax + push %rax + movl %es, %eax + push %rax + movl %ss, %eax + push %rax + + /* + * Convert x86-64 ABI params to i386 ABI + */ + subq $32, %rsp + movl %esi, 0x0(%rsp) + movl %edx, 0x4(%rsp) + movl %ecx, 0x8(%rsp) + movq %r8, %rsi + movl %esi, 0xc(%rsp) + movq %r9, %rsi + movl %esi, 0x10(%rsp) + + sgdt save_gdt(%rip) + + leaq 1f(%rip), %rbx + movq %rbx, func_rt_ptr(%rip) + + /* + * Switch to gdt with 32-bit segments. This is the firmware GDT + * that was installed when the kernel started executing. This + * pointer was saved at the EFI stub entry point in head_64.S. + */ + leaq efi32_boot_gdt(%rip), %rax + lgdt (%rax) + + pushq $__KERNEL_CS + leaq efi_enter32(%rip), %rax + pushq %rax + lretq + +1: addq $32, %rsp + + lgdt save_gdt(%rip) + + pop %rbx + movl %ebx, %ss + pop %rbx + movl %ebx, %es + pop %rbx + movl %ebx, %ds + + /* + * Convert 32-bit status code into 64-bit. + */ + test %rax, %rax + jz 1f + movl %eax, %ecx + andl $0x0fffffff, %ecx + andl $0xf0000000, %eax + shl $32, %rax + or %rcx, %rax +1: + addq $8, %rsp + pop %rbx + pop %rbp + ret +ENDPROC(efi64_thunk) + +ENTRY(efi_exit32) + movq func_rt_ptr(%rip), %rax + push %rax + mov %rdi, %rax + ret +ENDPROC(efi_exit32) + + .code32 +/* + * EFI service pointer must be in %edi. + * + * The stack should represent the 32-bit calling convention. + */ +ENTRY(efi_enter32) + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %es + movl %eax, %ss + + /* Reload pgtables */ + movl %cr3, %eax + movl %eax, %cr3 + + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + + /* Disable long mode via EFER */ + movl $MSR_EFER, %ecx + rdmsr + btrl $_EFER_LME, %eax + wrmsr + + call *%edi + + /* We must preserve return value */ + movl %eax, %edi + + /* + * Some firmware will return with interrupts enabled. Be sure to + * disable them before we switch GDTs. + */ + cli + + movl 56(%esp), %eax + movl %eax, 2(%eax) + lgdtl (%eax) + + movl %cr4, %eax + btsl $(X86_CR4_PAE_BIT), %eax + movl %eax, %cr4 + + movl %cr3, %eax + movl %eax, %cr3 + + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + wrmsr + + xorl %eax, %eax + lldt %ax + + movl 60(%esp), %eax + pushl $__KERNEL_CS + pushl %eax + + /* Enable paging */ + movl %cr0, %eax + btsl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + lret +ENDPROC(efi_enter32) + + .data + .balign 8 + .global efi32_boot_gdt +efi32_boot_gdt: .word 0 + .quad 0 + +save_gdt: .word 0 + .quad 0 +func_rt_ptr: .quad 0 + + .global efi_gdt64 +efi_gdt64: + .word efi_gdt64_end - efi_gdt64 + .long 0 /* Filled out by user */ + .word 0 + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00af9a000000ffff /* __KERNEL_CS */ + .quad 0x00cf92000000ffff /* __KERNEL_DS */ + .quad 0x0080890000000000 /* TS descriptor */ + .quad 0x0000000000000000 /* TS continued */ +efi_gdt64_end: diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index a950864a64da..5903089c818f 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -401,7 +401,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, * the entire decompressed kernel plus relocation table, or the * entire decompressed kernel plus .bss and .brk sections. */ - output = choose_kernel_location(input_data, input_len, output, + output = choose_kernel_location(real_mode, input_data, input_len, + output, output_len > run_size ? output_len : run_size); diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 04477d68403f..ee3576b2666b 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option); #if CONFIG_RANDOMIZE_BASE /* aslr.c */ -unsigned char *choose_kernel_location(unsigned char *input, +unsigned char *choose_kernel_location(struct boot_params *params, + unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size); @@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input, bool has_cpuflag(int flag); #else static inline -unsigned char *choose_kernel_location(unsigned char *input, +unsigned char *choose_kernel_location(struct boot_params *params, + unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 92003f3c8a42..efc3b22d896e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -213,7 +213,15 @@ void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); extern void setup_secondary_APIC_clock(void); extern int APIC_init_uniprocessor(void); + +#ifdef CONFIG_X86_64 +static inline int apic_force_enable(unsigned long addr) +{ + return -1; +} +#else extern int apic_force_enable(unsigned long addr); +#endif extern int apic_bsp_setup(bool upmode); extern void apic_ap_setup(void); diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h new file mode 100644 index 000000000000..cd2ce4068441 --- /dev/null +++ b/arch/x86/include/asm/imr.h @@ -0,0 +1,60 @@ +/* + * imr.h: Isolated Memory Region API + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _IMR_H +#define _IMR_H + +#include <linux/types.h> + +/* + * IMR agent access mask bits + * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register + * definitions. + */ +#define IMR_ESRAM_FLUSH BIT(31) +#define IMR_CPU_SNOOP BIT(30) /* Applicable only to write */ +#define IMR_RMU BIT(29) +#define IMR_VC1_SAI_ID3 BIT(15) +#define IMR_VC1_SAI_ID2 BIT(14) +#define IMR_VC1_SAI_ID1 BIT(13) +#define IMR_VC1_SAI_ID0 BIT(12) +#define IMR_VC0_SAI_ID3 BIT(11) +#define IMR_VC0_SAI_ID2 BIT(10) +#define IMR_VC0_SAI_ID1 BIT(9) +#define IMR_VC0_SAI_ID0 BIT(8) +#define IMR_CPU_0 BIT(1) /* SMM mode */ +#define IMR_CPU BIT(0) /* Non SMM mode */ +#define IMR_ACCESS_NONE 0 + +/* + * Read/Write access-all bits here include some reserved bits + * These are the values firmware uses and are accepted by hardware. + * The kernel defines read/write access-all in the same way as firmware + * in order to have a consistent and crisp definition across firmware, + * bootloader and kernel. + */ +#define IMR_READ_ACCESS_ALL 0xBFFFFFFF +#define IMR_WRITE_ACCESS_ALL 0xFFFFFFFF + +/* Number of IMRs provided by Quark X1000 SoC */ +#define QUARK_X1000_IMR_MAX 0x08 +#define QUARK_X1000_IMR_REGBASE 0x40 + +/* IMR alignment bits - only bits 31:10 are checked for IMR validity */ +#define IMR_ALIGN 0x400 +#define IMR_MASK (IMR_ALIGN - 1) + +int imr_add_range(phys_addr_t base, size_t size, + unsigned int rmask, unsigned int wmask, bool lock); + +int imr_remove_range(phys_addr_t base, size_t size); + +#endif /* _IMR_H */ diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 879fd7d33877..ef01fef3eebc 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -16,7 +16,6 @@ #define LHCALL_SET_PTE 14 #define LHCALL_SET_PGD 15 #define LHCALL_LOAD_TLS 16 -#define LHCALL_NOTIFY 17 #define LHCALL_LOAD_GDT_ENTRY 18 #define LHCALL_SEND_INTERRUPTS 19 diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index f97fbe3abb67..95e11f79f123 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -51,6 +51,8 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; +extern bool kaslr_enabled; + static inline phys_addr_t get_max_mapped(void) { return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 67fc3d2b0aab..a0c35bf6cb92 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -476,12 +476,14 @@ static inline int pmd_present(pmd_t pmd) */ static inline int pte_protnone(pte_t pte) { - return pte_flags(pte) & _PAGE_PROTNONE; + return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) + == _PAGE_PROTNONE; } static inline int pmd_protnone(pmd_t pmd) { - return pmd_flags(pmd) & _PAGE_PROTNONE; + return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) + == _PAGE_PROTNONE; } #endif /* CONFIG_NUMA_BALANCING */ diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 7050d864f520..cf87de3fc390 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key); static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) { - set_bit(0, (volatile unsigned long *)&lock->tickets.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) if (likely(inc.head == inc.tail)) goto out; - inc.tail &= ~TICKET_SLOWPATH_FLAG; for (;;) { unsigned count = SPIN_THRESHOLD; do { - if (READ_ONCE(lock->tickets.head) == inc.tail) - goto out; + inc.head = READ_ONCE(lock->tickets.head); + if (__tickets_equal(inc.head, inc.tail)) + goto clear_slowpath; cpu_relax(); } while (--count); __ticket_lock_spinning(lock, inc.tail); } -out: barrier(); /* make sure nothing creeps before the lock is taken */ +clear_slowpath: + __ticket_check_and_clear_slowpath(lock, inc.head); +out: + barrier(); /* make sure nothing creeps before the lock is taken */ } static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spinlock_t old, new; old.tickets = READ_ONCE(lock->tickets); - if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) + if (!__tickets_equal(old.tickets.head, old.tickets.tail)) return 0; new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); + new.head_tail &= ~TICKET_SLOWPATH_FLAG; /* cmpxchg is a full barrier, so nothing can move before it */ return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; } -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, - arch_spinlock_t old) -{ - arch_spinlock_t new; - - BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); - - /* Perform the unlock on the "before" copy */ - old.tickets.head += TICKET_LOCK_INC; - - /* Clear the slowpath flag */ - new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); - - /* - * If the lock is uncontended, clear the flag - use cmpxchg in - * case it changes behind our back though. - */ - if (new.tickets.head != new.tickets.tail || - cmpxchg(&lock->head_tail, old.head_tail, - new.head_tail) != old.head_tail) { - /* - * Lock still has someone queued for it, so wake up an - * appropriate waiter. - */ - __ticket_unlock_kick(lock, old.tickets.head); - } -} - static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { if (TICKET_SLOWPATH_FLAG && - static_key_false(¶virt_ticketlocks_enabled)) { - arch_spinlock_t prev; + static_key_false(¶virt_ticketlocks_enabled)) { + __ticket_t head; - prev = *lock; - add_smp(&lock->tickets.head, TICKET_LOCK_INC); + BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); - /* add_smp() is a full mb() */ + head = xadd(&lock->tickets.head, TICKET_LOCK_INC); - if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) - __ticket_unlock_slowpath(lock, prev); + if (unlikely(head & TICKET_SLOWPATH_FLAG)) { + head &= ~TICKET_SLOWPATH_FLAG; + __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); + } } else __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); } @@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) { struct __raw_tickets tmp = READ_ONCE(lock->tickets); - return tmp.tail != tmp.head; + return !__tickets_equal(tmp.tail, tmp.head); } static inline int arch_spin_is_contended(arch_spinlock_t *lock) { struct __raw_tickets tmp = READ_ONCE(lock->tickets); - return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; + tmp.head &= ~TICKET_SLOWPATH_FLAG; + return (tmp.tail - tmp.head) > TICKET_LOCK_INC; } #define arch_spin_is_contended arch_spin_is_contended @@ -191,8 +189,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) * We need to check "unlocked" in a loop, tmp.head == head * can be false positive because of overflow. */ - if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) || - tmp.head != head) + if (__tickets_equal(tmp.head, tmp.tail) || + !__tickets_equal(tmp.head, head)) break; cpu_relax(); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 0d592e0a5b84..ace9dec050b1 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -179,7 +179,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) asm volatile("call __get_user_%P3" \ : "=a" (__ret_gu), "=r" (__val_gu) \ : "0" (ptr), "i" (sizeof(*(ptr)))); \ - (x) = (__typeof__(*(ptr))) __val_gu; \ + (x) = (__force __typeof__(*(ptr))) __val_gu; \ __ret_gu; \ }) diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 225b0988043a..44e6dd7e36a2 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -7,6 +7,7 @@ #define SETUP_DTB 2 #define SETUP_PCI 3 #define SETUP_EFI 4 +#define SETUP_KASLR 5 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ae97ed0873c6..3d525c6124f6 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) { int rc, irq, trigger, polarity; + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { + *irqp = gsi; + return 0; + } + rc = acpi_get_override_irq(gsi, &trigger, &polarity); if (rc == 0) { trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b5c8ff5e9dfc..2346c95c6ab1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1396,6 +1396,12 @@ void cpu_init(void) wait_for_master_cpu(cpu); + /* + * Initialize the CR4 shadow before doing anything that could + * try to read it. + */ + cr4_init_shadow(); + show_ucode_info_early(); printk(KERN_INFO "Initializing CPU#%d\n", cpu); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 94d7dcb12145..50163fa9034f 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = { { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, - { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, - { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, + { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, + { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index c6826d1e8082..746e7fd08aad 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, struct microcode_header_intel mc_header; unsigned int mc_size; + if (leftover < sizeof(mc_header)) { + pr_err("error! Truncated header in microcode data file\n"); + break; + } + if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header))) break; diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index ec9df6f9cd47..420eb933189c 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start, unsigned int mc_saved_count = mc_saved_data->mc_saved_count; int i; - while (leftover) { + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { + + if (leftover < sizeof(mc_header)) + break; + mc_header = (struct microcode_header_intel *)ucode_ptr; mc_size = get_totalsize(mc_header); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 000d4199b03e..31e2d5bf3e38 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback) ENTRY(xen_do_upcall) 1: mov %esp, %eax call xen_evtchn_do_upcall +#ifndef CONFIG_PREEMPT + call xen_maybe_preempt_hcall +#endif jmp ret_from_intr CFI_ENDPROC ENDPROC(xen_hypervisor_callback) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index db13655c3a2a..10074ad9ebf8 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1208,6 +1208,9 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) popq %rsp CFI_DEF_CFA_REGISTER rsp decl PER_CPU_VAR(irq_count) +#ifndef CONFIG_PREEMPT + call xen_maybe_preempt_hcall +#endif jmp error_exit CFI_ENDPROC END(xen_do_hypervisor_callback) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 705ef8d48e2d..67b1cbe0093a 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -302,6 +302,9 @@ int check_irq_vectors_for_cpu_disable(void) irq = __this_cpu_read(vector_irq[vector]); if (irq >= 0) { desc = irq_to_desc(irq); + if (!desc) + continue; + data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, data->affinity); cpu_clear(this_cpu, affinity_new); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 98f654d466e5..4e3d5a9621fe 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -84,7 +84,7 @@ static volatile u32 twobyte_is_boostable[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ - W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ + W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ @@ -223,27 +223,48 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; + unsigned long faddr; kp = get_kprobe((void *)addr); - /* There is no probe, return original address */ - if (!kp) + faddr = ftrace_location(addr); + /* + * Addresses inside the ftrace location are refused by + * arch_check_ftrace_location(). Something went terribly wrong + * if such an address is checked here. + */ + if (WARN_ON(faddr && faddr != addr)) + return 0UL; + /* + * Use the current code if it is not modified by Kprobe + * and it cannot be modified by ftrace. + */ + if (!kp && !faddr) return addr; /* - * Basically, kp->ainsn.insn has an original instruction. - * However, RIP-relative instruction can not do single-stepping - * at different place, __copy_instruction() tweaks the displacement of - * that instruction. In that case, we can't recover the instruction - * from the kp->ainsn.insn. + * Basically, kp->ainsn.insn has an original instruction. + * However, RIP-relative instruction can not do single-stepping + * at different place, __copy_instruction() tweaks the displacement of + * that instruction. In that case, we can't recover the instruction + * from the kp->ainsn.insn. * - * On the other hand, kp->opcode has a copy of the first byte of - * the probed instruction, which is overwritten by int3. And - * the instruction at kp->addr is not modified by kprobes except - * for the first byte, we can recover the original instruction - * from it and kp->opcode. + * On the other hand, in case on normal Kprobe, kp->opcode has a copy + * of the first byte of the probed instruction, which is overwritten + * by int3. And the instruction at kp->addr is not modified by kprobes + * except for the first byte, we can recover the original instruction + * from it and kp->opcode. + * + * In case of Kprobes using ftrace, we do not have a copy of + * the original instruction. In fact, the ftrace location might + * be modified at anytime and even could be in an inconsistent state. + * Fortunately, we know that the original code is the ideal 5-byte + * long NOP. */ - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - buf[0] = kp->opcode; + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (faddr) + memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); + else + buf[0] = kp->opcode; return (unsigned long)buf; } @@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. + * Returns zero if the instruction can not get recovered. */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { @@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr) * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); + if (!__addr) + return 0; kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); insn_get_length(&insn); @@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src) unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); + if (!recovered_insn) + return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 0dd8d089c315..7b3b9d15c47a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr) */ return 0; recovered_insn = recover_probed_instruction(buf, addr); + if (!recovered_insn) + return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 94f643484300..e354cc6446ab 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -609,7 +609,7 @@ static inline void check_zero(void) u8 ret; u8 old; - old = ACCESS_ONCE(zero_stats); + old = READ_ONCE(zero_stats); if (unlikely(old)) { ret = cmpxchg(&zero_stats, old, 0); /* This ensures only one fellow resets the stat */ @@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) int cpu; u64 start; unsigned long flags; + __ticket_t head; if (in_nmi()) return; @@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) */ __ticket_enter_slowpath(lock); + /* make sure enter_slowpath, which is atomic does not cross the read */ + smp_mb__after_atomic(); + /* * check again make sure it didn't become free while * we weren't looking. */ - if (ACCESS_ONCE(lock->tickets.head) == want) { + head = READ_ONCE(lock->tickets.head); + if (__tickets_equal(head, want)) { add_stats(TAKEN_SLOW_PICKUP, 1); goto out; } @@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) add_stats(RELEASED_SLOW, 1); for_each_cpu(cpu, &waiting_cpus) { const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); - if (ACCESS_ONCE(w->lock) == lock && - ACCESS_ONCE(w->want) == ticket) { + if (READ_ONCE(w->lock) == lock && + READ_ONCE(w->want) == ticket) { add_stats(RELEASED_SLOW_KICKED, 1); kvm_kick_cpu(cpu); break; diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index d1ac80b72c72..9bbb9b35c144 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -47,21 +47,13 @@ do { \ #ifdef CONFIG_RANDOMIZE_BASE static unsigned long module_load_offset; -static int randomize_modules = 1; /* Mutex protects the module_load_offset. */ static DEFINE_MUTEX(module_kaslr_mutex); -static int __init parse_nokaslr(char *p) -{ - randomize_modules = 0; - return 0; -} -early_param("nokaslr", parse_nokaslr); - static unsigned long int get_module_load_offset(void) { - if (randomize_modules) { + if (kaslr_enabled) { mutex_lock(&module_kaslr_mutex); /* * Calculate the module_load_offset the first time this diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0a2421cca01f..98dc9317286e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -122,6 +122,8 @@ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; +bool __read_mostly kaslr_enabled = false; + #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif @@ -425,6 +427,11 @@ static void __init reserve_initrd(void) } #endif /* CONFIG_BLK_DEV_INITRD */ +static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) +{ + kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); +} + static void __init parse_setup_data(void) { struct setup_data *data; @@ -450,6 +457,9 @@ static void __init parse_setup_data(void) case SETUP_EFI: parse_efi_setup(pa_data, data_len); break; + case SETUP_KASLR: + parse_kaslr_setup(pa_data, data_len); + break; default: break; } @@ -832,10 +842,14 @@ static void __init trim_low_memory_range(void) static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { - pr_emerg("Kernel Offset: 0x%lx from 0x%lx " - "(relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, __START_KERNEL, - __START_KERNEL_map, MODULES_VADDR-1); + if (kaslr_enabled) + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", + (unsigned long)&_text - __START_KERNEL, + __START_KERNEL, + __START_KERNEL_map, + MODULES_VADDR-1); + else + pr_emerg("Kernel Offset: disabled\n"); return 0; } diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 8b96a947021f..81f8adb0679e 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -66,27 +66,54 @@ * Good-instruction tables for 32-bit apps. This is non-const and volatile * to keep gcc from statically optimizing it out, as variable_test_bit makes * some versions of gcc to think only *(unsigned long*) is used. + * + * Opcodes we'll probably never support: + * 6c-6f - ins,outs. SEGVs if used in userspace + * e4-e7 - in,out imm. SEGVs if used in userspace + * ec-ef - in,out acc. SEGVs if used in userspace + * cc - int3. SIGTRAP if used in userspace + * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs + * (why we support bound (62) then? it's similar, and similarly unused...) + * f1 - int1. SIGTRAP if used in userspace + * f4 - hlt. SEGVs if used in userspace + * fa - cli. SEGVs if used in userspace + * fb - sti. SEGVs if used in userspace + * + * Opcodes which need some work to be supported: + * 07,17,1f - pop es/ss/ds + * Normally not used in userspace, but would execute if used. + * Can cause GP or stack exception if tries to load wrong segment descriptor. + * We hesitate to run them under single step since kernel's handling + * of userspace single-stepping (TF flag) is fragile. + * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e) + * on the same grounds that they are never used. + * cd - int N. + * Used by userspace for "int 80" syscall entry. (Other "int N" + * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3). + * Not supported since kernel's handling of userspace single-stepping + * (TF flag) is fragile. + * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad */ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static volatile u32 good_insns_32[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */ + W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */ W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */ - W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */ + W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ + W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ - W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ + W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ - W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ + W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ - W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ + W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; @@ -94,27 +121,61 @@ static volatile u32 good_insns_32[256 / 32] = { #define good_insns_32 NULL #endif -/* Good-instruction tables for 64-bit apps */ +/* Good-instruction tables for 64-bit apps. + * + * Genuinely invalid opcodes: + * 06,07 - formerly push/pop es + * 0e - formerly push cs + * 16,17 - formerly push/pop ss + * 1e,1f - formerly push/pop ds + * 27,2f,37,3f - formerly daa/das/aaa/aas + * 60,61 - formerly pusha/popa + * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported) + * 82 - formerly redundant encoding of Group1 + * 9a - formerly call seg:ofs + * ce - formerly into + * d4,d5 - formerly aam/aad + * d6 - formerly undocumented salc + * ea - formerly jmp seg:ofs + * + * Opcodes we'll probably never support: + * 6c-6f - ins,outs. SEGVs if used in userspace + * e4-e7 - in,out imm. SEGVs if used in userspace + * ec-ef - in,out acc. SEGVs if used in userspace + * cc - int3. SIGTRAP if used in userspace + * f1 - int1. SIGTRAP if used in userspace + * f4 - hlt. SEGVs if used in userspace + * fa - cli. SEGVs if used in userspace + * fb - sti. SEGVs if used in userspace + * + * Opcodes which need some work to be supported: + * cd - int N. + * Used by userspace for "int 80" syscall entry. (Other "int N" + * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3). + * Not supported since kernel's handling of userspace single-stepping + * (TF flag) is fragile. + * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad + */ #if defined(CONFIG_X86_64) static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */ + W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */ W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */ - W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */ - W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ + W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */ + W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */ + W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ - W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ + W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ + W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */ W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ - W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ + W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ - W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ - W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ + W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */ + W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; @@ -122,49 +183,55 @@ static volatile u32 good_insns_64[256 / 32] = { #define good_insns_64 NULL #endif -/* Using this for both 64-bit and 32-bit apps */ +/* Using this for both 64-bit and 32-bit apps. + * Opcodes we don't support: + * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns + * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group. + * Also encodes tons of other system insns if mod=11. + * Some are in fact non-system: xend, xtest, rdtscp, maybe more + * 0f 05 - syscall + * 0f 06 - clts (CPL0 insn) + * 0f 07 - sysret + * 0f 08 - invd (CPL0 insn) + * 0f 09 - wbinvd (CPL0 insn) + * 0f 0b - ud2 + * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?) + * 0f 34 - sysenter + * 0f 35 - sysexit + * 0f 37 - getsec + * 0f 78 - vmread (Intel VMX. CPL0 insn) + * 0f 79 - vmwrite (Intel VMX. CPL0 insn) + * Note: with prefixes, these two opcodes are + * extrq/insertq/AVX512 convert vector ops. + * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt], + * {rd,wr}{fs,gs}base,{s,l,m}fence. + * Why? They are all user-executable. + */ static volatile u32 good_2byte_insns[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ - W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ - W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ - W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ + W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */ + W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ + W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ + W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ - W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ + W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ - W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ + W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ + W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ - W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ + W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ - W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ + W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */ /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; #undef W /* - * opcodes we'll probably never support: - * - * 6c-6d, e4-e5, ec-ed - in - * 6e-6f, e6-e7, ee-ef - out - * cc, cd - int3, int - * cf - iret - * d6 - illegal instruction - * f1 - int1/icebp - * f4 - hlt - * fa, fb - cli, sti - * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2 - * - * invalid opcodes in 64-bit mode: - * - * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5 - * 63 - we support this opcode in x86_64 but not in i386. - * * opcodes we may need to refine support for: * * 0f - 2-byte instructions: For many of these instructions, the validity diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index 4a0890f815c4..08f41caada45 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig @@ -1,6 +1,6 @@ config LGUEST_GUEST bool "Lguest guest support" - depends on X86_32 && PARAVIRT + depends on X86_32 && PARAVIRT && PCI select TTY select VIRTUALIZATION select VIRTIO @@ -8,7 +8,7 @@ config LGUEST_GUEST help Lguest is a tiny in-kernel hypervisor. Selecting this will allow your kernel to boot under lguest. This option will increase - your kernel size by about 6k. If in doubt, say N. + your kernel size by about 10k. If in doubt, say N. If you say Y here, make sure you say Y (or M) to the virtio block and net drivers which lguest needs. diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index c1c1544b8485..ac4453d8520e 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -56,6 +56,9 @@ #include <linux/virtio_console.h> #include <linux/pm.h> #include <linux/export.h> +#include <linux/pci.h> +#include <linux/virtio_pci.h> +#include <asm/acpi.h> #include <asm/apic.h> #include <asm/lguest.h> #include <asm/paravirt.h> @@ -71,6 +74,8 @@ #include <asm/stackprotector.h> #include <asm/reboot.h> /* for struct machine_ops */ #include <asm/kvm_para.h> +#include <asm/pci_x86.h> +#include <asm/pci-direct.h> /*G:010 * Welcome to the Guest! @@ -831,6 +836,24 @@ static struct irq_chip lguest_irq_controller = { .irq_unmask = enable_lguest_irq, }; +static int lguest_enable_irq(struct pci_dev *dev) +{ + u8 line = 0; + + /* We literally use the PCI interrupt line as the irq number. */ + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); + irq_set_chip_and_handler_name(line, &lguest_irq_controller, + handle_level_irq, "level"); + dev->irq = line; + return 0; +} + +/* We don't do hotplug PCI, so this shouldn't be called. */ +static void lguest_disable_irq(struct pci_dev *dev) +{ + WARN_ON(1); +} + /* * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware * interrupt (except 128, which is used for system calls), and then tells the @@ -1181,25 +1204,136 @@ static __init char *lguest_memory_setup(void) return "LGUEST"; } +/* Offset within PCI config space of BAR access capability. */ +static int console_cfg_offset = 0; +static int console_access_cap; + +/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */ +static void set_cfg_window(u32 cfg_offset, u32 off) +{ + write_pci_config_byte(0, 1, 0, + cfg_offset + offsetof(struct virtio_pci_cap, bar), + 0); + write_pci_config(0, 1, 0, + cfg_offset + offsetof(struct virtio_pci_cap, length), + 4); + write_pci_config(0, 1, 0, + cfg_offset + offsetof(struct virtio_pci_cap, offset), + off); +} + +static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val) +{ + /* + * We could set this up once, then leave it; nothing else in the * + * kernel should touch these registers. But if it went wrong, that + * would be a horrible bug to find. + */ + set_cfg_window(cfg_offset, off); + write_pci_config(0, 1, 0, + cfg_offset + sizeof(struct virtio_pci_cap), val); +} + +static void probe_pci_console(void) +{ + u8 cap, common_cap = 0, device_cap = 0; + /* Offset within BAR0 */ + u32 device_offset; + u32 device_len; + + /* Avoid recursive printk into here. */ + console_cfg_offset = -1; + + if (!early_pci_allowed()) { + printk(KERN_ERR "lguest: early PCI access not allowed!\n"); + return; + } + + /* We expect a console PCI device at BUS0, slot 1. */ + if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) { + printk(KERN_ERR "lguest: PCI device is %#x!\n", + read_pci_config(0, 1, 0, 0)); + return; + } + + /* Find the capabilities we need (must be in bar0) */ + cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST); + while (cap) { + u8 vndr = read_pci_config_byte(0, 1, 0, cap); + if (vndr == PCI_CAP_ID_VNDR) { + u8 type, bar; + u32 offset, length; + + type = read_pci_config_byte(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, cfg_type)); + bar = read_pci_config_byte(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, bar)); + offset = read_pci_config(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, offset)); + length = read_pci_config(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, length)); + + switch (type) { + case VIRTIO_PCI_CAP_DEVICE_CFG: + if (bar == 0) { + device_cap = cap; + device_offset = offset; + device_len = length; + } + break; + case VIRTIO_PCI_CAP_PCI_CFG: + console_access_cap = cap; + break; + } + } + cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT); + } + if (!device_cap || !console_access_cap) { + printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n", + common_cap, device_cap, console_access_cap); + return; + } + + /* + * Note that we can't check features, until we've set the DRIVER + * status bit. We don't want to do that until we have a real driver, + * so we just check that the device-specific config has room for + * emerg_wr. If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE + * it should ignore the access. + */ + if (device_len < (offsetof(struct virtio_console_config, emerg_wr) + + sizeof(u32))) { + printk(KERN_ERR "lguest: console missing emerg_wr field\n"); + return; + } + + console_cfg_offset = device_offset; + printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n"); +} + /* * We will eventually use the virtio console device to produce console output, - * but before that is set up we use LHCALL_NOTIFY on normal memory to produce - * console output. + * but before that is set up we use the virtio PCI console's backdoor mmio + * access and the "emergency" write facility (which is legal even before the + * device is configured). */ static __init int early_put_chars(u32 vtermno, const char *buf, int count) { - char scratch[17]; - unsigned int len = count; + /* If we couldn't find PCI console, forget it. */ + if (console_cfg_offset < 0) + return count; - /* We use a nul-terminated string, so we make a copy. Icky, huh? */ - if (len > sizeof(scratch) - 1) - len = sizeof(scratch) - 1; - scratch[len] = '\0'; - memcpy(scratch, buf, len); - hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); + if (unlikely(!console_cfg_offset)) { + probe_pci_console(); + if (console_cfg_offset < 0) + return count; + } - /* This routine returns the number of bytes actually written. */ - return len; + write_bar_via_cfg(console_access_cap, + console_cfg_offset + + offsetof(struct virtio_console_config, emerg_wr), + buf[0]); + return 1; } /* @@ -1400,14 +1534,6 @@ __init void lguest_init(void) atomic_notifier_chain_register(&panic_notifier_list, &paniced); /* - * The IDE code spends about 3 seconds probing for disks: if we reserve - * all the I/O ports up front it can't get them and so doesn't probe. - * Other device drivers are similar (but less severe). This cuts the - * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. - */ - paravirt_disable_iospace(); - - /* * This is messy CPU setup stuff which the native boot code does before * start_kernel, so we have to do, too: */ @@ -1436,6 +1562,13 @@ __init void lguest_init(void) /* Register our very early console. */ virtio_cons_early_init(early_put_chars); + /* Don't let ACPI try to control our PCI interrupts. */ + disable_acpi(); + + /* We control them ourselves, by overriding these two hooks. */ + pcibios_enable_irq = lguest_enable_irq; + pcibios_disable_irq = lguest_disable_irq; + /* * Last of all, we set the power management poweroff hook to point to * the Guest routine to power off, and the reboot hook to our restart diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 553c094b9cd7..a110efca6d06 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -238,6 +238,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr, } } +static const char *page_size_string(struct map_range *mr) +{ + static const char str_1g[] = "1G"; + static const char str_2m[] = "2M"; + static const char str_4m[] = "4M"; + static const char str_4k[] = "4k"; + + if (mr->page_size_mask & (1<<PG_LEVEL_1G)) + return str_1g; + /* + * 32-bit without PAE has a 4M large page size. + * PG_LEVEL_2M is misnamed, but we can at least + * print out the right size in the string. + */ + if (IS_ENABLED(CONFIG_X86_32) && + !IS_ENABLED(CONFIG_X86_PAE) && + mr->page_size_mask & (1<<PG_LEVEL_2M)) + return str_4m; + + if (mr->page_size_mask & (1<<PG_LEVEL_2M)) + return str_2m; + + return str_4k; +} + static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long start, unsigned long end) @@ -333,8 +358,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, for (i = 0; i < nr_range; i++) printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, - (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( - (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); + page_size_string(&mr[i])); return nr_range; } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 919b91205cd4..df4552bd239e 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = { .flags = -1, }; -static unsigned int stack_maxrandom_size(void) +static unsigned long stack_maxrandom_size(void) { - unsigned int max = 0; + unsigned long max = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { - max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; + max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; } return max; diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index 85afde1fa3e5..a62e0be3a2f1 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile @@ -5,6 +5,7 @@ obj-y += geode/ obj-y += goldfish/ obj-y += iris/ obj-y += intel-mid/ +obj-y += intel-quark/ obj-y += olpc/ obj-y += scx200/ obj-y += sfi/ diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 5fcda7272550..86d0f9e08dd9 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -91,167 +91,6 @@ ENTRY(efi_call) ret ENDPROC(efi_call) -#ifdef CONFIG_EFI_MIXED - -/* - * We run this function from the 1:1 mapping. - * - * This function must be invoked with a 1:1 mapped stack. - */ -ENTRY(__efi64_thunk) - movl %ds, %eax - push %rax - movl %es, %eax - push %rax - movl %ss, %eax - push %rax - - subq $32, %rsp - movl %esi, 0x0(%rsp) - movl %edx, 0x4(%rsp) - movl %ecx, 0x8(%rsp) - movq %r8, %rsi - movl %esi, 0xc(%rsp) - movq %r9, %rsi - movl %esi, 0x10(%rsp) - - sgdt save_gdt(%rip) - - leaq 1f(%rip), %rbx - movq %rbx, func_rt_ptr(%rip) - - /* Switch to gdt with 32-bit segments */ - movl 64(%rsp), %eax - lgdt (%rax) - - leaq efi_enter32(%rip), %rax - pushq $__KERNEL_CS - pushq %rax - lretq - -1: addq $32, %rsp - - lgdt save_gdt(%rip) - - pop %rbx - movl %ebx, %ss - pop %rbx - movl %ebx, %es - pop %rbx - movl %ebx, %ds - - /* - * Convert 32-bit status code into 64-bit. - */ - test %rax, %rax - jz 1f - movl %eax, %ecx - andl $0x0fffffff, %ecx - andl $0xf0000000, %eax - shl $32, %rax - or %rcx, %rax -1: - ret -ENDPROC(__efi64_thunk) - -ENTRY(efi_exit32) - movq func_rt_ptr(%rip), %rax - push %rax - mov %rdi, %rax - ret -ENDPROC(efi_exit32) - - .code32 -/* - * EFI service pointer must be in %edi. - * - * The stack should represent the 32-bit calling convention. - */ -ENTRY(efi_enter32) - movl $__KERNEL_DS, %eax - movl %eax, %ds - movl %eax, %es - movl %eax, %ss - - /* Reload pgtables */ - movl %cr3, %eax - movl %eax, %cr3 - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Disable long mode via EFER */ - movl $MSR_EFER, %ecx - rdmsr - btrl $_EFER_LME, %eax - wrmsr - - call *%edi - - /* We must preserve return value */ - movl %eax, %edi - - /* - * Some firmware will return with interrupts enabled. Be sure to - * disable them before we switch GDTs. - */ - cli - - movl 68(%esp), %eax - movl %eax, 2(%eax) - lgdtl (%eax) - - movl %cr4, %eax - btsl $(X86_CR4_PAE_BIT), %eax - movl %eax, %cr4 - - movl %cr3, %eax - movl %eax, %cr3 - - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_LME, %eax - wrmsr - - xorl %eax, %eax - lldt %ax - - movl 72(%esp), %eax - pushl $__KERNEL_CS - pushl %eax - - /* Enable paging */ - movl %cr0, %eax - btsl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - lret -ENDPROC(efi_enter32) - - .data - .balign 8 - .global efi32_boot_gdt -efi32_boot_gdt: .word 0 - .quad 0 - -save_gdt: .word 0 - .quad 0 -func_rt_ptr: .quad 0 - - .global efi_gdt64 -efi_gdt64: - .word efi_gdt64_end - efi_gdt64 - .long 0 /* Filled out by user */ - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00af9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf92000000ffff /* __KERNEL_DS */ - .quad 0x0080890000000000 /* TS descriptor */ - .quad 0x0000000000000000 /* TS continued */ -efi_gdt64_end: -#endif /* CONFIG_EFI_MIXED */ - .data ENTRY(efi_scratch) .fill 3,8,0 diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 8806fa73e6e6..ff85d28c50f2 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -1,9 +1,26 @@ /* * Copyright (C) 2014 Intel Corporation; author Matt Fleming + * + * Support for invoking 32-bit EFI runtime services from a 64-bit + * kernel. + * + * The below thunking functions are only used after ExitBootServices() + * has been called. This simplifies things considerably as compared with + * the early EFI thunking because we can leave all the kernel state + * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime + * services from __KERNEL32_CS. This means we can continue to service + * interrupts across an EFI mixed mode call. + * + * We do however, need to handle the fact that we're running in a full + * 64-bit virtual address space. Things like the stack and instruction + * addresses need to be accessible by the 32-bit firmware, so we rely on + * using the identity mappings in the EFI page table to access the stack + * and kernel text (see efi_setup_page_tables()). */ #include <linux/linkage.h> #include <asm/page_types.h> +#include <asm/segment.h> .text .code64 @@ -33,14 +50,6 @@ ENTRY(efi64_thunk) leaq efi_exit32(%rip), %rbx subq %rax, %rbx movl %ebx, 8(%rsp) - leaq efi_gdt64(%rip), %rbx - subq %rax, %rbx - movl %ebx, 2(%ebx) - movl %ebx, 4(%rsp) - leaq efi_gdt32(%rip), %rbx - subq %rax, %rbx - movl %ebx, 2(%ebx) - movl %ebx, (%rsp) leaq __efi64_thunk(%rip), %rbx subq %rax, %rbx @@ -52,14 +61,92 @@ ENTRY(efi64_thunk) retq ENDPROC(efi64_thunk) - .data -efi_gdt32: - .word efi_gdt32_end - efi_gdt32 - .long 0 /* Filled out above */ - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00cf9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf93000000ffff /* __KERNEL_DS */ -efi_gdt32_end: +/* + * We run this function from the 1:1 mapping. + * + * This function must be invoked with a 1:1 mapped stack. + */ +ENTRY(__efi64_thunk) + movl %ds, %eax + push %rax + movl %es, %eax + push %rax + movl %ss, %eax + push %rax + + subq $32, %rsp + movl %esi, 0x0(%rsp) + movl %edx, 0x4(%rsp) + movl %ecx, 0x8(%rsp) + movq %r8, %rsi + movl %esi, 0xc(%rsp) + movq %r9, %rsi + movl %esi, 0x10(%rsp) + + leaq 1f(%rip), %rbx + movq %rbx, func_rt_ptr(%rip) + + /* Switch to 32-bit descriptor */ + pushq $__KERNEL32_CS + leaq efi_enter32(%rip), %rax + pushq %rax + lretq + +1: addq $32, %rsp + + pop %rbx + movl %ebx, %ss + pop %rbx + movl %ebx, %es + pop %rbx + movl %ebx, %ds + /* + * Convert 32-bit status code into 64-bit. + */ + test %rax, %rax + jz 1f + movl %eax, %ecx + andl $0x0fffffff, %ecx + andl $0xf0000000, %eax + shl $32, %rax + or %rcx, %rax +1: + ret +ENDPROC(__efi64_thunk) + +ENTRY(efi_exit32) + movq func_rt_ptr(%rip), %rax + push %rax + mov %rdi, %rax + ret +ENDPROC(efi_exit32) + + .code32 +/* + * EFI service pointer must be in %edi. + * + * The stack should represent the 32-bit calling convention. + */ +ENTRY(efi_enter32) + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %es + movl %eax, %ss + + call *%edi + + /* We must preserve return value */ + movl %eax, %edi + + movl 72(%esp), %eax + pushl $__KERNEL_CS + pushl %eax + + lret +ENDPROC(efi_enter32) + + .data + .balign 8 +func_rt_ptr: .quad 0 efi_saved_sp: .quad 0 diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 1bbedc4b0f88..3005f0c89f2e 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void) intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); else { intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); - pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); + pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); } out: diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile new file mode 100644 index 000000000000..9cc57ed36022 --- /dev/null +++ b/arch/x86/platform/intel-quark/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IMR) += imr.o +obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c new file mode 100644 index 000000000000..0ee619f9fcb7 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr.c @@ -0,0 +1,661 @@ +/** + * imr.c + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * IMR registers define an isolated region of memory that can + * be masked to prohibit certain system agents from accessing memory. + * When a device behind a masked port performs an access - snooped or + * not, an IMR may optionally prevent that transaction from changing + * the state of memory or from getting correct data in response to the + * operation. + * + * Write data will be dropped and reads will return 0xFFFFFFFF, the + * system will reset and system BIOS will print out an error message to + * inform the user that an IMR has been violated. + * + * This code is based on the Linux MTRR code and reference code from + * Intel's Quark BSP EFI, Linux and grub code. + * + * See quark-x1000-datasheet.pdf for register definitions. + * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm-generic/sections.h> +#include <asm/cpu_device_id.h> +#include <asm/imr.h> +#include <asm/iosf_mbi.h> +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/types.h> + +struct imr_device { + struct dentry *file; + bool init; + struct mutex lock; + int max_imr; + int reg_base; +}; + +static struct imr_device imr_dev; + +/* + * IMR read/write mask control registers. + * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for + * bit definitions. + * + * addr_hi + * 31 Lock bit + * 30:24 Reserved + * 23:2 1 KiB aligned lo address + * 1:0 Reserved + * + * addr_hi + * 31:24 Reserved + * 23:2 1 KiB aligned hi address + * 1:0 Reserved + */ +#define IMR_LOCK BIT(31) + +struct imr_regs { + u32 addr_lo; + u32 addr_hi; + u32 rmask; + u32 wmask; +}; + +#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) +#define IMR_SHIFT 8 +#define imr_to_phys(x) ((x) << IMR_SHIFT) +#define phys_to_imr(x) ((x) >> IMR_SHIFT) + +/** + * imr_is_enabled - true if an IMR is enabled false otherwise. + * + * Determines if an IMR is enabled based on address range and read/write + * mask. An IMR set with an address range set to zero and a read/write + * access mask set to all is considered to be disabled. An IMR in any + * other state - for example set to zero but without read/write access + * all is considered to be enabled. This definition of disabled is how + * firmware switches off an IMR and is maintained in kernel for + * consistency. + * + * @imr: pointer to IMR descriptor. + * @return: true if IMR enabled false if disabled. + */ +static inline int imr_is_enabled(struct imr_regs *imr) +{ + return !(imr->rmask == IMR_READ_ACCESS_ALL && + imr->wmask == IMR_WRITE_ACCESS_ALL && + imr_to_phys(imr->addr_lo) == 0 && + imr_to_phys(imr->addr_hi) == 0); +} + +/** + * imr_read - read an IMR at a given index. + * + * Requires caller to hold imr mutex. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to read. + * @imr: IMR structure representing address and access masks. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) +{ + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->addr_lo); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->addr_hi); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->rmask); + if (ret) + return ret; + + return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->wmask); +} + +/** + * imr_write - write an IMR at a given index. + * + * Requires caller to hold imr mutex. + * Note lock bits need to be written independently of address bits. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to write. + * @imr: IMR structure representing address and access masks. + * @lock: indicates if the IMR lock bit should be applied. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_write(struct imr_device *idev, u32 imr_id, + struct imr_regs *imr, bool lock) +{ + unsigned long flags; + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + local_irq_save(flags); + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++, + imr->addr_lo); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->addr_hi); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->rmask); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->wmask); + if (ret) + goto failed; + + /* Lock bit must be set separately to addr_lo address bits. */ + if (lock) { + imr->addr_lo |= IMR_LOCK; + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg - IMR_NUM_REGS, imr->addr_lo); + if (ret) + goto failed; + } + + local_irq_restore(flags); + return 0; +failed: + /* + * If writing to the IOSF failed then we're in an unknown state, + * likely a very bad state. An IMR in an invalid state will almost + * certainly lead to a memory access violation. + */ + local_irq_restore(flags); + WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n", + imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); + + return ret; +} + +/** + * imr_dbgfs_state_show - print state of IMR registers. + * + * @s: pointer to seq_file for output. + * @unused: unused parameter. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_dbgfs_state_show(struct seq_file *s, void *unused) +{ + phys_addr_t base; + phys_addr_t end; + int i; + struct imr_device *idev = s->private; + struct imr_regs imr; + size_t size; + int ret = -ENODEV; + + mutex_lock(&idev->lock); + + for (i = 0; i < idev->max_imr; i++) { + + ret = imr_read(idev, i, &imr); + if (ret) + break; + + /* + * Remember to add IMR_ALIGN bytes to size to indicate the + * inherent IMR_ALIGN size bytes contained in the masked away + * lower ten bits. + */ + if (imr_is_enabled(&imr)) { + base = imr_to_phys(imr.addr_lo); + end = imr_to_phys(imr.addr_hi) + IMR_MASK; + } else { + base = 0; + end = 0; + } + size = end - base; + seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " + "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, + &base, &end, size, imr.rmask, imr.wmask, + imr_is_enabled(&imr) ? "enabled " : "disabled", + imr.addr_lo & IMR_LOCK ? "locked" : "unlocked"); + } + + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_state_open - debugfs open callback. + * + * @inode: pointer to struct inode. + * @file: pointer to struct file. + * @return: result of single open. + */ +static int imr_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, imr_dbgfs_state_show, inode->i_private); +} + +static const struct file_operations imr_state_ops = { + .open = imr_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * imr_debugfs_register - register debugfs hooks. + * + * @idev: pointer to imr_device structure. + * @return: 0 on success - errno on failure. + */ +static int imr_debugfs_register(struct imr_device *idev) +{ + idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, + idev, &imr_state_ops); + return PTR_ERR_OR_ZERO(idev->file); +} + +/** + * imr_debugfs_unregister - unregister debugfs hooks. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void imr_debugfs_unregister(struct imr_device *idev) +{ + debugfs_remove(idev->file); +} + +/** + * imr_check_params - check passed address range IMR alignment and non-zero size + * + * @base: base address of intended IMR. + * @size: size of intended IMR. + * @return: zero on valid range -EINVAL on unaligned base/size. + */ +static int imr_check_params(phys_addr_t base, size_t size) +{ + if ((base & IMR_MASK) || (size & IMR_MASK)) { + pr_err("base %pa size 0x%08zx must align to 1KiB\n", + &base, size); + return -EINVAL; + } + if (size == 0) + return -EINVAL; + + return 0; +} + +/** + * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. + * + * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the + * value in the register. We need to subtract IMR_ALIGN bytes from input sizes + * as a result. + * + * @size: input size bytes. + * @return: reduced size. + */ +static inline size_t imr_raw_size(size_t size) +{ + return size - IMR_ALIGN; +} + +/** + * imr_address_overlap - detects an address overlap. + * + * @addr: address to check against an existing IMR. + * @imr: imr being checked. + * @return: true for overlap false for no overlap. + */ +static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) +{ + return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); +} + +/** + * imr_add_range - add an Isolated Memory Region. + * + * @base: physical base address of region aligned to 1KiB. + * @size: physical size of region in bytes must be aligned to 1KiB. + * @read_mask: read access mask. + * @write_mask: write access mask. + * @lock: indicates whether or not to permanently lock this region. + * @return: zero on success or negative value indicating error. + */ +int imr_add_range(phys_addr_t base, size_t size, + unsigned int rmask, unsigned int wmask, bool lock) +{ + phys_addr_t end; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int reg; + int ret; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + ret = imr_check_params(base, size); + if (ret) + return ret; + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + /* + * Check for reserved IMR value common to firmware, kernel and grub + * indicating a disabled IMR. + */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + if (!imr_is_enabled(&imr)) + return -ENOTSUPP; + + mutex_lock(&idev->lock); + + /* + * Find a free IMR while checking for an existing overlapping range. + * Note there's no restriction in silicon to prevent IMR overlaps. + * For the sake of simplicity and ease in defining/debugging an IMR + * memory map we exclude IMR overlaps. + */ + reg = -1; + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + /* Find overlap @ base or end of requested range. */ + ret = -EINVAL; + if (imr_is_enabled(&imr)) { + if (imr_address_overlap(base, &imr)) + goto failed; + if (imr_address_overlap(end, &imr)) + goto failed; + } else { + reg = i; + } + } + + /* Error out if we have no free IMR entries. */ + if (reg == -1) { + ret = -ENOMEM; + goto failed; + } + + pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n", + reg, &base, &end, raw_size, rmask, wmask); + + /* Enable IMR at specified range and access mask. */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + + ret = imr_write(idev, reg, &imr, lock); + if (ret < 0) { + /* + * In the highly unlikely event iosf_mbi_write failed + * attempt to rollback the IMR setup skipping the trapping + * of further IOSF write failures. + */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + imr_write(idev, reg, &imr, false); + } +failed: + mutex_unlock(&idev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(imr_add_range); + +/** + * __imr_remove_range - delete an Isolated Memory Region. + * + * This function allows you to delete an IMR by its index specified by reg or + * by address range specified by base and size respectively. If you specify an + * index on its own the base and size parameters are ignored. + * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. + * imr_remove_range(-1, base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static int __imr_remove_range(int reg, phys_addr_t base, size_t size) +{ + phys_addr_t end; + bool found = false; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int ret = 0; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + /* + * Validate address range if deleting by address, else we are + * deleting by index where base and size will be ignored. + */ + if (reg == -1) { + ret = imr_check_params(base, size); + if (ret) + return ret; + } + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + mutex_lock(&idev->lock); + + if (reg >= 0) { + /* If a specific IMR is given try to use it. */ + ret = imr_read(idev, reg, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) { + ret = -ENODEV; + goto failed; + } + found = true; + } else { + /* Search for match based on address range. */ + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) + continue; + + if ((imr_to_phys(imr.addr_lo) == base) && + (imr_to_phys(imr.addr_hi) == end)) { + found = true; + reg = i; + break; + } + } + } + + if (!found) { + ret = -ENODEV; + goto failed; + } + + pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size); + + /* Tear down the IMR. */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + + ret = imr_write(idev, reg, &imr, false); + +failed: + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_remove_range - delete an Isolated Memory Region by address + * + * This function allows you to delete an IMR by an address range specified + * by base and size respectively. + * imr_remove_range(base, size); delete IMR from base to base+size. + * + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +int imr_remove_range(phys_addr_t base, size_t size) +{ + return __imr_remove_range(-1, base, size); +} +EXPORT_SYMBOL_GPL(imr_remove_range); + +/** + * imr_clear - delete an Isolated Memory Region by index + * + * This function allows you to delete an IMR by an address range specified + * by the index of the IMR. Useful for initial sanitization of the IMR + * address map. + * imr_ge(base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static inline int imr_clear(int reg) +{ + return __imr_remove_range(reg, 0, 0); +} + +/** + * imr_fixup_memmap - Tear down IMRs used during bootup. + * + * BIOS and Grub both setup IMRs around compressed kernel, initrd memory + * that need to be removed before the kernel hands out one of the IMR + * encased addresses to a downstream DMA agent such as the SD or Ethernet. + * IMRs on Galileo are setup to immediately reset the system on violation. + * As a result if you're running a root filesystem from SD - you'll need + * the boot-time IMRs torn down or you'll find seemingly random resets when + * using your filesystem. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void __init imr_fixup_memmap(struct imr_device *idev) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + int i; + int ret; + + /* Tear down all existing unlocked IMRs. */ + for (i = 0; i < idev->max_imr; i++) + imr_clear(i); + + /* + * Setup a locked IMR around the physical extent of the kernel + * from the beginning of the .text secton to the end of the + * .rodata section as one physically contiguous block. + */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); + if (ret < 0) { + pr_err("unable to setup IMR for kernel: (%p - %p)\n", + &_text, &__end_rodata); + } else { + pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", + size / 1024, &_text, &__end_rodata); + } + +} + +static const struct x86_cpu_id imr_ids[] __initconst = { + { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */ + {} +}; +MODULE_DEVICE_TABLE(x86cpu, imr_ids); + +/** + * imr_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_init(void) +{ + struct imr_device *idev = &imr_dev; + int ret; + + if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) + return -ENODEV; + + idev->max_imr = QUARK_X1000_IMR_MAX; + idev->reg_base = QUARK_X1000_IMR_REGBASE; + idev->init = true; + + mutex_init(&idev->lock); + ret = imr_debugfs_register(idev); + if (ret != 0) + pr_warn("debugfs register failed!\n"); + imr_fixup_memmap(idev); + return 0; +} + +/** + * imr_exit - exit point for IMR code. + * + * Deregisters debugfs, leave IMR state as-is. + * + * return: + */ +static void __exit imr_exit(void) +{ + imr_debugfs_unregister(&imr_dev); +} + +module_init(imr_init); +module_exit(imr_exit); + +MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>"); +MODULE_DESCRIPTION("Intel Isolated Memory Region driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c new file mode 100644 index 000000000000..c9a0838890e2 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -0,0 +1,129 @@ +/** + * imr_selftest.c + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * IMR self test. The purpose of this module is to run a set of tests on the + * IMR API to validate it's sanity. We check for overlapping, reserved + * addresses and setup/teardown sanity. + * + */ + +#include <asm-generic/sections.h> +#include <asm/imr.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/types.h> + +#define SELFTEST KBUILD_MODNAME ": " +/** + * imr_self_test_result - Print result string for self test. + * + * @res: result code - true if test passed false otherwise. + * @fmt: format string. + * ... variadic argument list. + */ +static void __init imr_self_test_result(int res, const char *fmt, ...) +{ + va_list vlist; + + /* Print pass/fail. */ + if (res) + pr_info(SELFTEST "pass "); + else + pr_info(SELFTEST "fail "); + + /* Print variable string. */ + va_start(vlist, fmt); + vprintk(fmt, vlist); + va_end(vlist); + + /* Optional warning. */ + WARN(res == 0, "test failed"); +} +#undef SELFTEST + +/** + * imr_self_test + * + * Verify IMR self_test with some simple tests to verify overlap, + * zero sized allocations and 1 KiB sized areas. + * + */ +static void __init imr_self_test(void) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n"; + int ret; + + /* Test zero zero. */ + ret = imr_add_range(0, 0, 0, 0, false); + imr_self_test_result(ret < 0, "zero sized IMR\n"); + + /* Test exact overlap. */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with base inside of existing. */ + base += size - IMR_ALIGN; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with end inside of existing. */ + base -= size + IMR_ALIGN * 2; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL, false); + imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n"); + + /* Test that a 1 KiB IMR @ zero with CPU only will work. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n"); + if (ret >= 0) { + ret = imr_remove_range(0, IMR_ALIGN); + imr_self_test_result(ret == 0, "teardown - cpu-access\n"); + } + + /* Test 2 KiB works. */ + size = IMR_ALIGN * 2; + ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL, false); + imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n"); + if (ret >= 0) { + ret = imr_remove_range(0, size); + imr_self_test_result(ret == 0, "teardown 2KiB\n"); + } +} + +/** + * imr_self_test_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_self_test_init(void) +{ + imr_self_test(); + return 0; +} + +/** + * imr_self_test_exit - exit point for IMR code. + * + * return: + */ +static void __exit imr_self_test_exit(void) +{ +} + +module_init(imr_self_test_init); +module_exit(imr_self_test_exit); + +MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>"); +MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bd8b8459c3d0..5240f563076d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val) BUG_ON(val); } #endif + +static u64 xen_read_msr_safe(unsigned int msr, int *err) +{ + u64 val; + + val = native_read_msr_safe(msr, err); + switch (msr) { + case MSR_IA32_APICBASE: +#ifdef CONFIG_X86_X2APIC + if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) +#endif + val &= ~X2APIC_ENABLE; + break; + } + return val; +} + static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .wbinvd = native_wbinvd, - .read_msr = native_read_msr_safe, + .read_msr = xen_read_msr_safe, .write_msr = xen_write_msr_safe, .read_tsc = native_read_tsc, @@ -1741,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void) #ifdef CONFIG_X86_32 i386_start_kernel(); #else + cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ x86_64_start_reservations((char *)__pa_symbol(&boot_params)); #endif } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 23b45eb9a89c..956374c1edbc 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -41,7 +41,7 @@ static u8 zero_stats; static inline void check_zero(void) { u8 ret; - u8 old = ACCESS_ONCE(zero_stats); + u8 old = READ_ONCE(zero_stats); if (unlikely(old)) { ret = cmpxchg(&zero_stats, old, 0); /* This ensures only one fellow resets the stat */ @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); int cpu = smp_processor_id(); u64 start; + __ticket_t head; unsigned long flags; /* If kicker interrupts not initialized yet, just spin */ @@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) */ __ticket_enter_slowpath(lock); + /* make sure enter_slowpath, which is atomic does not cross the read */ + smp_mb__after_atomic(); + /* * check again make sure it didn't become free while * we weren't looking */ - if (ACCESS_ONCE(lock->tickets.head) == want) { + head = READ_ONCE(lock->tickets.head); + if (__tickets_equal(head, want)) { add_stats(TAKEN_SLOW_PICKUP, 1); goto out; } @@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); /* Make sure we read lock before want */ - if (ACCESS_ONCE(w->lock) == lock && - ACCESS_ONCE(w->want) == next) { + if (READ_ONCE(w->lock) == lock && + READ_ONCE(w->want) == next) { add_stats(RELEASED_SLOW_KICKED, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); break; diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 876eb380aa26..147b26ed9c91 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -182,13 +182,13 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __user_ok(addr,size) \ +#define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) -#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) -#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) +#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) /* * These are the main single-value transfer routines. They @@ -204,8 +204,8 @@ * (a) re-use the arguments for side effects (sizeof is ok) * (b) require any knowledge of processes at this stage */ -#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) -#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) /* * The "__xxx" versions of the user access functions are versions that @@ -213,39 +213,39 @@ * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). */ -#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) extern long __put_user_bad(void); -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ }) -#define __put_user_size(x,ptr,size,retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ int __cb; \ retval = 0; \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ - case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ - case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ + case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \ + case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \ + case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \ case 8: { \ __typeof__(*ptr) __v64 = x; \ - retval = __copy_to_user(ptr,&__v64,8); \ + retval = __copy_to_user(ptr, &__v64, 8); \ break; \ } \ default: __put_user_bad(); \ @@ -316,35 +316,35 @@ __asm__ __volatile__( \ :"=r" (err), "=r" (cb) \ :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err, __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ - if (access_ok(VERIFY_READ,__gu_addr,size)) \ - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ +#define __get_user_size(x, ptr, size, retval) \ do { \ int __cb; \ retval = 0; \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ - case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ - case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ - case 8: retval = __copy_from_user(&x,ptr,8); break; \ + case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\ + case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\ + case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\ + case 8: retval = __copy_from_user(&x, ptr, 8); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) @@ -390,19 +390,19 @@ __asm__ __volatile__( \ */ extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); -#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) +#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) { - return __copy_user(to,from,n); + return __copy_user(to, from, n); } static inline unsigned long __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) { - return __copy_user(to,from,n); + return __copy_user(to, from, n); } static inline unsigned long @@ -410,7 +410,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n) { prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to,from,n); + return __copy_user(to, from, n); return n; } @@ -419,18 +419,18 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) { prefetchw(to); if (access_ok(VERIFY_READ, from, n)) - return __copy_user(to,from,n); + return __copy_user(to, from, n); else memset(to, 0, n); return n; } -#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) -#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) -#define __copy_to_user(to,from,n) \ - __generic_copy_to_user_nocheck((to),(from),(n)) -#define __copy_from_user(to,from,n) \ - __generic_copy_from_user_nocheck((to),(from),(n)) +#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) +#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) +#define __copy_to_user(to, from, n) \ + __generic_copy_to_user_nocheck((to), (from), (n)) +#define __copy_from_user(to, from, n) \ + __generic_copy_from_user_nocheck((to), (from), (n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user |