diff options
Diffstat (limited to 'arch/powerpc')
119 files changed, 612 insertions, 406 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d0e8a1dbf822..8f4d50b0adfa 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -138,7 +138,6 @@ config PPC select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ select IRQ_PER_CPU - select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL @@ -210,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE config ARCH_SUSPEND_POSSIBLE def_bool y depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ - PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x + (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x config PPC_DCR_NATIVE bool diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts index 22f64b62d7f6..e0668f877794 100644 --- a/arch/powerpc/boot/dts/p1020rdb.dts +++ b/arch/powerpc/boot/dts/p1020rdb.dts @@ -1,7 +1,7 @@ /* * P1020 RDB Device Tree Source * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -553,7 +553,7 @@ reg = <0 0xffe09000 0 0x1000>; bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <16 2>; @@ -580,8 +580,8 @@ #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <16 2>; @@ -590,8 +590,8 @@ #size-cells = <2>; #address-cells = <3>; device_type = "pci"; - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x0 diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts index da4cb0d8d215..e2d48fd4416e 100644 --- a/arch/powerpc/boot/dts/p2020rdb.dts +++ b/arch/powerpc/boot/dts/p2020rdb.dts @@ -1,7 +1,7 @@ /* * P2020 RDB Device Tree Source * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -537,7 +537,7 @@ reg = <0 0xffe09000 0 0x1000>; bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <25 2>; @@ -564,8 +564,8 @@ #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <26 2>; @@ -574,8 +574,8 @@ #size-cells = <2>; #address-cells = <3>; device_type = "pci"; - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x0 diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts index 0fe93d0c8b2e..b69c3a5dc858 100644 --- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts +++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts @@ -6,7 +6,7 @@ * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb, * eth1, eth2, sdhc, crypto, global-util, pci0. * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -342,7 +342,7 @@ reg = <0 0xffe09000 0 0x1000>; bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <25 2>; diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts index e95a51285328..7a31d46c01b0 100644 --- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts +++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts @@ -7,7 +7,7 @@ * * Please note to add "-b 1" for core1's dts compiling. * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -162,8 +162,8 @@ #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <26 2>; @@ -172,8 +172,8 @@ #size-cells = <2>; #address-cells = <3>; device_type = "pci"; - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x0 diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index 6cf9d6614805..abf74dc1f79c 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig @@ -47,6 +47,7 @@ CONFIG_MTD_NAND_NDFC=y CONFIG_MTD_UBI=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_RAM=y +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_AT24=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index 6828eda02bdc..0c7de9620ea6 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig @@ -43,6 +43,7 @@ CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_SCSI_TGT=y CONFIG_BLK_DEV_SD=y diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig index 4b2441244eab..d41857a5152d 100644 --- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig +++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig @@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_DS1682=y CONFIG_IDE=y CONFIG_BLK_DEV_IDECS=y diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig index a360ba44b928..38303ec11bcd 100644 --- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig @@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_DS1682=y CONFIG_IDE=y CONFIG_BLK_DEV_IDECS=y diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index be2829dd129f..98533973d20f 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig @@ -138,6 +138,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_DS1682=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig index 0c9c7ed7ec75..b614508d6fd2 100644 --- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig +++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig @@ -63,6 +63,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig index 06f95492afc7..9fa1613e5e2b 100644 --- a/arch/powerpc/configs/e55xx_smp_defconfig +++ b/arch/powerpc/configs/e55xx_smp_defconfig @@ -32,6 +32,7 @@ CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index f39d0cf876dd..8a874b999867 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig @@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index 62db8a3df162..c02bbb2fddf8 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -61,6 +61,7 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=1 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_XIP=y +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_AT24=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 7376e27b8ed4..e63f537b854a 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig @@ -52,6 +52,7 @@ CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_AT24=y CONFIG_SCSI_TGT=y CONFIG_BLK_DEV_SD=y diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index 99a19d1e9bf8..c06a86c33098 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -82,6 +82,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index c636f23f8c92..942ced90557c 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -84,6 +84,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig index 55b54318fef6..038a308cbfc4 100644 --- a/arch/powerpc/configs/mpc86xx_defconfig +++ b/arch/powerpc/configs/mpc86xx_defconfig @@ -66,6 +66,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index edd2d54c8196..f4deb0b78cf0 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -59,6 +59,7 @@ CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 9d64a6822d86..0a10fb009ef7 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -398,6 +398,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_CDROM_PKTCDVD=m CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_HD=y +CONFIG_MISC_DEVICES=y CONFIG_ENCLOSURE_SERVICES=m CONFIG_SENSORS_TSL2550=m CONFIG_EEPROM_AT24=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 9c3f22c6cde1..249ddd0a27cd 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -189,6 +189,7 @@ CONFIG_TIGON3=y CONFIG_BNX2=m CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m CONFIG_EHEA=y CONFIG_IXGBE=m CONFIG_IXGB=m @@ -255,6 +256,8 @@ CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_EHCA=m +CONFIG_INFINIBAND_CXGB3=m +CONFIG_INFINIBAND_CXGB4=m CONFIG_MLX4_INFINIBAND=m CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 2e561876fc89..f18c6d9b9510 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -209,8 +209,8 @@ static __inline__ unsigned long ffz(unsigned long x) return BITS_PER_LONG; /* - * Calculate the bit position of the least signficant '1' bit in x - * (since x has been changed this will actually be the least signficant + * Calculate the bit position of the least significant '1' bit in x + * (since x has been changed this will actually be the least significant * '0' bit in * the original x). Note: (x & -x) gives us a mask that * is the least significant * (RIGHT-most) 1-bit of the value in x. */ diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 2296112e247b..91010e8f8479 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -140,7 +140,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) unsigned long usp = regs->gpr[1]; /* - * We cant access below the stack pointer in the 32bit ABI and + * We can't access below the stack pointer in the 32bit ABI and * can access 288 bytes in the 64bit ABI */ if (!is_32bit_task()) diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h index e50323fe941f..4398a6cdcf53 100644 --- a/arch/powerpc/include/asm/cpm.h +++ b/arch/powerpc/include/asm/cpm.h @@ -98,7 +98,7 @@ typedef struct cpm_buf_desc { #define BD_SC_INTRPT (0x1000) /* Interrupt on change */ #define BD_SC_LAST (0x0800) /* Last buffer in frame */ #define BD_SC_TC (0x0400) /* Transmit CRC */ -#define BD_SC_CM (0x0200) /* Continous mode */ +#define BD_SC_CM (0x0200) /* Continuous mode */ #define BD_SC_ID (0x0100) /* Rec'd too many idles */ #define BD_SC_P (0x0100) /* xmt preamble */ #define BD_SC_BR (0x0020) /* Break received */ diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index bd07650dca56..8ee4211ca0c6 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -4,7 +4,7 @@ * * This file contains structures and information for the communication * processor channels. Some CPM control and status is available - * throught the MPC8xx internal memory map. See immap.h for details. + * through the MPC8xx internal memory map. See immap.h for details. * This file only contains what I need for the moment, not the total * CPM capabilities. I (or someone else) will add definitions as they * are needed. -- Dan diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index be3cdf9134ce..1833d1a07e79 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -382,10 +382,12 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL) +#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ @@ -435,11 +437,15 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500) +#else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) +#endif #else enum { CPU_FTRS_POSSIBLE = @@ -473,16 +479,21 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | + CPU_FTRS_E5500 | #endif 0, }; #endif /* __powerpc64__ */ #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500) +#else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) +#endif #else enum { CPU_FTRS_ALWAYS = @@ -513,6 +524,7 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & + CPU_FTRS_E5500 & #endif CPU_FTRS_POSSIBLE, }; diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 6d2416a85709..dd70fac57ec8 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync_page(struct page *page, unsigned long offset, size_t size, int direction); +extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); #else /* ! CONFIG_NOT_COHERENT_CACHE */ /* @@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t); +#define ARCH_HAS_DMA_MMAP_COHERENT + + static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index ec089acfa56b..8edec710cc6d 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -122,7 +122,7 @@ #define H_DABRX_KERNEL (1UL<<(63-62)) #define H_DABRX_USER (1UL<<(63-63)) -/* Each control block has to be on a 4K bondary */ +/* Each control block has to be on a 4K boundary */ #define H_CB_ALIGNMENT 4096 /* pSeries hypervisor opcodes */ diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index d0e7701fa1f6..be0171afdc0f 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -50,7 +50,7 @@ typedef unsigned int kprobe_opcode_t; * Handle cases where: * - User passes a <.symbol> or <module:.symbol> * - User passes a <symbol> or <module:symbol> - * - User passes a non-existant symbol, kallsyms_lookup_name + * - User passes a non-existent symbol, kallsyms_lookup_name * returns 0. Don't deref the NULL pointer in that case */ #define kprobe_lookup_name(name, addr) \ diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 26b8c807f8f1..a077adc0b35e 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -105,7 +105,7 @@ struct lppaca { // processing of external interrupts. Note that PLIC will store the // XIRR directly into the xXirrValue field so that another XIRR will // not be presented until this one clears. The layout of the low - // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the + // 4-bytes of this Dword is up to SLIC - PLIC just checks whether the // entire Dword is zero or not. A non-zero value in the low order // 2-bytes will result in SLIC being granted the highest thread // priority upon return. A 0 will return to SLIC as medium priority. diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index fe56a23e1ff0..e4f01915fbb0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -35,9 +35,9 @@ struct smp_ops_t { int (*probe)(void); void (*kick_cpu)(int nr); void (*setup_cpu)(int nr); + void (*bringup_done)(void); void (*take_timebase)(void); void (*give_timebase)(void); - int (*cpu_enable)(unsigned int nr); int (*cpu_disable)(void); void (*cpu_die)(unsigned int nr); int (*cpu_bootable)(unsigned int nr); @@ -267,7 +267,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); -extern void power4_cpu_offline_powersave(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index acac35d5b382..ae7b3efec8e5 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -27,7 +27,7 @@ #define STE_VSID_SHIFT 12 /* Location of cpu0's segment table */ -#define STAB0_PAGE 0x6 +#define STAB0_PAGE 0x8 #define STAB0_OFFSET (STAB0_PAGE << 12) #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index da4b20008541..2cd664ef0a5e 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -100,7 +100,7 @@ extern phys_addr_t kernstart_addr; #endif #ifdef CONFIG_FLATMEM -#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) +#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) #endif diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 932f88dcf6fa..812b2cd80aed 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -169,7 +169,7 @@ do { \ /* * This is the default if a program doesn't have a PT_GNU_STACK * program header entry. The PPC64 ELF ABI has a non executable stack - * stack by default, so in the absense of a PT_GNU_STACK program header + * stack by default, so in the absence of a PT_GNU_STACK program header * we turn execute permission off. */ #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h index 19fd7933e2d9..eafa5a5f56de 100644 --- a/arch/powerpc/include/asm/pasemi_dma.h +++ b/arch/powerpc/include/asm/pasemi_dma.h @@ -522,7 +522,7 @@ extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, dma_addr_t *handle); -/* Routines to allocate flags (events) for channel syncronization */ +/* Routines to allocate flags (events) for channel synchronization */ extern int pasemi_dma_alloc_flag(void); extern void pasemi_dma_free_flag(int flag); extern void pasemi_dma_set_flag(int flag); diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 5e156e034fe2..b90dbf8e5cd9 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -106,7 +106,7 @@ struct pci_controller { * Used for variants of PCI indirect handling and possible quirks: * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 * EXT_REG - provides access to PCI-e extended registers - * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS + * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS * to determine which bus number to match on when generating type0 * config cycles diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h index 00eedc5a4e61..10902c9375d0 100644 --- a/arch/powerpc/include/asm/pmac_feature.h +++ b/arch/powerpc/include/asm/pmac_feature.h @@ -53,8 +53,8 @@ /* Here is the infamous serie of OHare based machines */ -#define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */ -#define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */ +#define PMAC_TYPE_COMET 0x20 /* Believed to be PowerBook 2400 */ +#define PMAC_TYPE_HOOPER 0x21 /* Believed to be PowerBook 3400 */ #define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */ #define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */ #define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */ diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 76bb195e4f24..8d1569c29042 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -86,7 +86,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) #endif -/* _PAGE_CHG_MASK masks of bits that are to be preserved accross +/* _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes */ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ @@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); * on platforms where such control is possible. */ #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) #define PAGE_KERNEL_TEXT PAGE_KERNEL_X #else #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX @@ -174,7 +174,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); /* * Don't just check for any non zero bits in __PAGE_USER, since for book3e * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in - * _PAGE_USER. Need to explictly match _PAGE_BAP_UR bit in that case too. + * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. */ #define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER) diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index 9e2cb2019161..f706164b0bd0 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h @@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); static inline void qe_ic_cascade_low_ipic(unsigned int irq, struct irq_desc *desc) { - struct qe_ic *qe_ic = get_irq_desc_data(desc); + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); if (cascade_irq != NO_IRQ) @@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq, static inline void qe_ic_cascade_high_ipic(unsigned int irq, struct irq_desc *desc) { - struct qe_ic *qe_ic = get_irq_desc_data(desc); + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); if (cascade_irq != NO_IRQ) @@ -101,9 +101,9 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq, static inline void qe_ic_cascade_low_mpic(unsigned int irq, struct irq_desc *desc) { - struct qe_ic *qe_ic = get_irq_desc_data(desc); + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); - struct irq_chip *chip = get_irq_desc_chip(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); @@ -114,9 +114,9 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq, static inline void qe_ic_cascade_high_mpic(unsigned int irq, struct irq_desc *desc) { - struct qe_ic *qe_ic = get_irq_desc_data(desc); + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); - struct irq_chip *chip = get_irq_desc_chip(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); @@ -127,9 +127,9 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq, static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, struct irq_desc *desc) { - struct qe_ic *qe_ic = get_irq_desc_data(desc); + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); unsigned int cascade_irq; - struct irq_chip *chip = get_irq_desc_chip(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); cascade_irq = qe_ic_get_high_irq(qe_ic); if (cascade_irq == NO_IRQ) diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 86ad8128963a..b316794aa2b5 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -2,7 +2,7 @@ * Contains register definitions common to the Book E PowerPC * specification. Notice that while the IBM-40x series of CPUs * are not true Book E PowerPCs, they borrowed a number of features - * before Book E was finalized, and are included here as well. Unfortunatly, + * before Book E was finalized, and are included here as well. Unfortunately, * they sometimes used different locations than true Book E CPUs did. * * This program is free software; you can redistribute it and/or @@ -110,7 +110,7 @@ #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ #define SPRN_MAS3 0x273 /* MMU Assist Register 3 */ #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ -#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */ +#define SPRN_MAS5 0x153 /* MMU Assist Register 5 */ #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ #define SPRN_PID1 0x279 /* Process ID Register 1 */ #define SPRN_PID2 0x27A /* Process ID Register 2 */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 66e237bbe15f..a902a0d3ae0d 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -36,15 +36,16 @@ extern void cpu_die(void); extern void smp_send_debugger_break(int cpu); extern void smp_message_recv(int); +extern void start_secondary_resume(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); #ifdef CONFIG_HOTPLUG_CPU -extern void fixup_irqs(const struct cpumask *map); +extern void migrate_irqs(void); int generic_cpu_disable(void); -int generic_cpu_enable(unsigned int cpu); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); +void generic_set_cpu_dead(unsigned int cpu); #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h index 25020a34ce7f..d8f5c60f61c1 100644 --- a/arch/powerpc/include/asm/spu_priv1.h +++ b/arch/powerpc/include/asm/spu_priv1.h @@ -223,7 +223,7 @@ spu_disable_spu (struct spu_context *ctx) } /* - * The declarations folowing are put here for convenience + * The declarations following are put here for convenience * and only intended to be used by the platform setup code. */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index aa0f1ebb4aaf..60f64b132bd4 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -348,3 +348,7 @@ COMPAT_SYS_SPU(sendmsg) COMPAT_SYS_SPU(recvmsg) COMPAT_SYS_SPU(recvmmsg) SYSCALL_SPU(accept4) +SYSCALL_SPU(name_to_handle_at) +COMPAT_SYS_SPU(open_by_handle_at) +COMPAT_SYS_SPU(clock_adjtime) +SYSCALL_SPU(syncfs) diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h index f737732c3861..ae9c899c8a6d 100644 --- a/arch/powerpc/include/asm/uninorth.h +++ b/arch/powerpc/include/asm/uninorth.h @@ -60,7 +60,7 @@ * * Obviously, the GART is not cache coherent and so any change to it * must be flushed to memory (or maybe just make the GART space non - * cachable). AGP memory itself doens't seem to be cache coherent neither. + * cachable). AGP memory itself does't seem to be cache coherent neither. * * In order to invalidate the GART (which is probably necessary to inval * the bridge internal TLBs), the following sequence has to be written, diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 6151937657f6..3c215648ce6d 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -367,10 +367,14 @@ #define __NR_recvmsg 342 #define __NR_recvmmsg 343 #define __NR_accept4 344 +#define __NR_name_to_handle_at 345 +#define __NR_open_by_handle_at 346 +#define __NR_clock_adjtime 347 +#define __NR_syncfs 348 #ifdef __KERNEL__ -#define __NR_syscalls 345 +#define __NR_syscalls 349 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index 25e39220e89c..b73a8199f161 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -57,7 +57,7 @@ struct vdso_data { } version; /* Note about the platform flags: it now only contains the lpar - * bit. The actual platform number is dead and burried + * bit. The actual platform number is dead and buried */ __u32 platform; /* Platform flags 0x18 */ __u32 processor; /* Processor type 0x1C */ diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae5585..60b3e377b1e4 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) /* This function can be used to enable the early boot text when doing * OF booting or within bootx init. It must be followed by a btext_unmap() - * call before the logical address becomes unuseable + * call before the logical address becomes unusable */ void __init btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c9b68d07ac4f..b9602ee06deb 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", - .cpu_features = CPU_FTRS_E500MC, + .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d569e2aff18..5b5e1f002a8e 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -188,7 +188,7 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } -#endif +#endif /* CONFIG_PPC_STD_MMU_64 */ /* * This function will be called by secondary cpus or by kexec cpu @@ -233,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs) crash_ipi_callback(regs); } -#else +#else /* ! CONFIG_SMP */ +static inline void crash_kexec_wait_realmode(int cpu) {} + static void crash_kexec_prepare_cpus(int cpu) { /* @@ -253,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs) { cpus_in_sr = CPU_MASK_NONE; } -#endif +#endif /* CONFIG_SMP */ /* * Register a function to be called on shutdown. Only use this if you @@ -344,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); -#endif machine_kexec_mask_interrupts(); diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index cf02cad62d9a..d238c082c3c5 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -179,3 +179,21 @@ static int __init dma_init(void) return 0; } fs_initcall(dma_init); + +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + unsigned long pfn; + +#ifdef CONFIG_NOT_COHERENT_CACHE + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); +#else + pfn = page_to_pfn(virt_to_page(cpu_addr)); +#endif + return remap_pfn_range(vma, vma->vm_start, + pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} +EXPORT_SYMBOL_GPL(dma_mmap_coherent); diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d2506..9651acc3504a 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -379,7 +379,7 @@ interrupt_end_book3e: mfspr r13,SPRN_SPRG_PACA /* get our PACA */ b system_call_common -/* Auxillary Processor Unavailable Interrupt */ +/* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 8a817995b4cd..aeb739e18769 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -5,7 +5,7 @@ * handling and other fixed offset specific things. * * This file is meant to be #included from head_64.S due to - * position dependant assembly. + * position dependent assembly. * * Most of this originates from head_64.S and thus has the same * copyright history. @@ -977,20 +977,6 @@ _GLOBAL(do_stab_bolted) rfid b . /* prevent speculative execution */ -/* - * Space for CPU0's segment table. - * - * On iSeries, the hypervisor must fill in at least one entry before - * we get control (with relocate on). The address is given to the hv - * as a page number (see xLparMap below), so this must be at a - * fixed address (the linker can't compute (u64)&initial_stab >> - * PAGE_SHIFT). - */ - . = STAB0_OFFSET /* 0x6000 */ - .globl initial_stab -initial_stab: - .space 4096 - #ifdef CONFIG_PPC_PSERIES /* * Data area reserved for FWNMI option. @@ -1027,3 +1013,17 @@ xLparMap: #ifdef CONFIG_PPC_PSERIES . = 0x8000 #endif /* CONFIG_PPC_PSERIES */ + +/* + * Space for CPU0's segment table. + * + * On iSeries, the hypervisor must fill in at least one entry before + * we get control (with relocate on). The address is given to the hv + * as a page number (see xLparMap above), so this must be at a + * fixed address (the linker can't compute (u64)&initial_stab >> + * PAGE_SHIFT). + */ + . = STAB0_OFFSET /* 0x8000 */ + .globl initial_stab +initial_stab: + .space 4096 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f4..c5c24beb8387 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -890,6 +890,15 @@ __secondary_start: mtspr SPRN_SRR1,r4 SYNC RFI + +_GLOBAL(start_secondary_resume) + /* Reset stack */ + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 9dd21a8c4d52..a91626d87fc9 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -766,7 +766,7 @@ DataAccess: * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE - * r12, r9 - avilable to use + * r12, r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index cbb3436b592d..5e12b741ba5f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -178,7 +178,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23df7c85..3a319f9c9d3e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -40,7 +40,7 @@ #include <asm/kvm_book3s_asm.h> #include <asm/ptrace.h> -/* The physical memory is layed out such that the secondary processor +/* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ @@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start) add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3e02710d9562..5ecf54cfa7d4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -326,7 +326,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index c00d4ca1ee15..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int ibmebus_bus_pm_freeze(struct device *dev) { @@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define ibmebus_bus_pm_freeze NULL #define ibmebus_bus_pm_thaw NULL @@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) #define ibmebus_bus_pm_poweroff_noirq NULL #define ibmebus_bus_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { .prepare = ibmebus_bus_pm_prepare, diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeedc..ba3195478600 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) isync b 1b -_GLOBAL(power4_cpu_offline_powersave) - /* Go to NAP now */ - mfmsr r7 - rldicl r0,r7,48,1 - rotldi r0,r0,16 - mtmsrd r0,1 /* hard-disable interrupts */ - li r0,1 - li r6,0 - stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ - stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE - oris r7,r7,MSR_POW@h - sync - isync - mtmsrd r7 - isync - blr diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 63625e0650b5..f621b7d2d869 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(const struct cpumask *map) +void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; + const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d80..97ec8557f974 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /**** Might be a good idea to set L2DO here - to prevent instructions from getting into the cache. But since we invalidate the next time we enable the cache it doesn't really matter. - Don't do this unless you accomodate all processor variations. + Don't do this unless you accommodate all processor variations. The bit moved on the 7450..... ****/ diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c834757bebc0..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) if (!parent) continue; if (of_match_node(legacy_serial_parents, parent) != NULL) { - index = add_legacy_soc_port(np, np); - if (index >= 0 && np == stdout) - legacy_serial_console = index; + if (of_device_is_available(np)) { + index = add_legacy_soc_port(np, np); + if (index >= 0 && np == stdout) + legacy_serial_console = index; + } } of_node_put(parent); } diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 16468362ad57..301db65f05a1 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "system_active_processors=%d\n", ppp_data.active_system_procs); - /* pool related entries are apropriate for shared configs */ + /* pool related entries are appropriate for shared configs */ if (lppaca_of(0).shared_proc) { unsigned long pool_idle_time, pool_procs; diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f4adf89d7614..10f0aadee95b 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -203,7 +203,7 @@ void __init free_unused_pacas(void) { int new_size; - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); + new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); if (new_size >= paca_size) return; diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 97e0ae414940..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], return 0; } +static u64 check_and_compute_delta(u64 prev, u64 val) +{ + u64 delta = (val - prev) & 0xfffffffful; + + /* + * POWER7 can roll back counter values, if the new value is smaller + * than the previous value it will cause the delta and the counter to + * have bogus values unless we rolled a counter over. If a coutner is + * rolled back, it will be smaller, but within 256, which is the maximum + * number of events to rollback at once. If we dectect a rollback + * return 0. This can lead to a small lack of precision in the + * counters. + */ + if (prev > val && (prev - val) < 256) + delta = 0; + + return delta; +} + static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + delta = check_and_compute_delta(prev, val); + if (!delta) + return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - local64_add(delta, &event->count); + delta = check_and_compute_delta(prev, val); + if (delta) + local64_add(delta, &event->count); } } @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; - u64 val; + u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; - local64_set(&event->hw.prev_count, val); + prev = local64_read(&event->hw.prev_count); + if (check_and_compute_delta(prev, val)) + local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } @@ -759,7 +782,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) /* * If group events scheduling transaction was started, - * skip the schedulability test here, it will be peformed + * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); - delta = (val - prev) & 0xfffffffful; + delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index e83ba3f078e4..1b1787d52896 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -15,7 +15,7 @@ /* * Grab the register values as they are now. - * This won't do a particularily good job because we really + * This won't do a particularly good job because we really * want our caller's caller's registers, and our caller has * already executed its prologue. * ToDo: We could reach back into the caller's save area to do diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05b7139d6a27..e74fa12afc82 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -683,7 +683,7 @@ void __init early_init_devtree(void *params) #endif #ifdef CONFIG_PHYP_DUMP - /* scan tree to see if dump occured during last boot */ + /* scan tree to see if dump occurred during last boot */ of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); #endif @@ -739,7 +739,7 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); - /* Retreive CPU related informations from the flat tree + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 895b082f1e48..55613e33e263 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call - * the fp and VMX calls aswell. This only get/sets the lower 32 + * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 7980ec0e1e1a..67f6c3b51357 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -465,7 +465,7 @@ static void start_event_scan(void) pr_debug("rtasd: will sleep for %d milliseconds\n", (30000 / rtas_event_scan_rate)); - /* Retreive errors from nvram if any */ + /* Retrieve errors from nvram if any */ retreive_nvram_error_log(); schedule_delayed_work_on(cpumask_first(cpu_online_mask), diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a46647..21f30cb68077 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) */ cpu_init_thread_core_maps(nthreads); + /* Now that possible cpus are set, set nr_cpu_ids for later use */ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + free_unused_pacas(); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 981360509172..cbdbb14be4b0 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,6 +57,25 @@ #define DBG(fmt...) #endif + +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif + struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id) per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); } -static void __init smp_create_idle(unsigned int cpu) -{ - struct task_struct *p; - - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); -#ifdef CONFIG_PPC64 - paca[cpu].__current = p; - paca[cpu].kstack = (unsigned long) task_thread_info(p) - + THREAD_SIZE - STACK_FRAME_OVERHEAD; -#endif - current_set[cpu] = task_thread_info(p); - task_thread_info(p)->cpu = cpu; -} - void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = NR_CPUS; else max_cpus = 1; - - for_each_possible_cpu(cpu) - if (cpu != boot_cpuid) - smp_create_idle(cpu); } void __devinit smp_prepare_boot_cpu(void) @@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void) #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU during hotplug phases */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -317,30 +315,8 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_mask); -#endif - return 0; -} - -int generic_cpu_enable(unsigned int cpu) -{ - /* Do the normal bootup if we haven't - * already bootstrapped. */ - if (system_state != SYSTEM_RUNNING) - return -ENOSYS; - - /* get the target out of it's holding state */ - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - smp_wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - -#ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_mask); - /* counter the irq disable in fixup_irqs */ - local_irq_enable(); #endif + migrate_irqs(); return 0; } @@ -362,37 +338,89 @@ void generic_mach_cpu_die(void) unsigned int cpu; local_irq_disable(); + idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); - set_cpu_online(cpu, true); - local_irq_enable(); +} + +void generic_set_cpu_dead(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_DEAD; } #endif -static int __devinit cpu_enable(unsigned int cpu) +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) { - if (smp_ops && smp_ops->cpu_enable) - return smp_ops->cpu_enable(cpu); + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +static int __cpuinit create_idle(unsigned int cpu) +{ + struct thread_info *ti; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + + c_idle.idle = get_idle_for_cpu(cpu); + + /* We can't use kernel_thread since we must avoid to + * reschedule the child. We use a workqueue because + * we want to fork from a kernel thread, not whatever + * userspace process happens to be trying to online us. + */ + if (!c_idle.idle) { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } else + init_idle(c_idle.idle, cpu); + if (IS_ERR(c_idle.idle)) { + pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); + return PTR_ERR(c_idle.idle); + } + ti = task_thread_info(c_idle.idle); + +#ifdef CONFIG_PPC64 + paca[cpu].__current = c_idle.idle; + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#endif + ti->cpu = cpu; + current_set[cpu] = ti; - return -ENOSYS; + return 0; } int __cpuinit __cpu_up(unsigned int cpu) { - int c; + int rc, c; secondary_ti = current_set[cpu]; - if (!cpu_enable(cpu)) - return 0; if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; + /* Make sure we have an idle thread */ + rc = create_idle(cpu); + if (rc) + return rc; + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ @@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -int __devinit start_secondary(void *unused) +void __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; @@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused) secondary_cpu_time_init(); +#ifdef CONFIG_PPC64 + if (system_state == SYSTEM_RUNNING) + vdso_data->processorCount++; +#endif ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused) local_irq_enable(); cpu_idle(); - return 0; + + BUG(); } int setup_profiling_timer(unsigned int multiplier) @@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus) free_cpumask_var(old_mask); + if (smp_ops && smp_ops->bringup_done) + smp_ops->bringup_done(); + dump_numa_cpu_topology(); + } int arch_sd_sibling_asym_packing(void) @@ -660,5 +697,9 @@ void cpu_die(void) { if (ppc_md.cpu_die) ppc_md.cpu_die(); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(); } + #endif diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e237438..ba4dee3d233f 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Disable MSR:DR to make sure we don't take a TLB or * hash miss during the copy, as our hash table will - * for a while be unuseable. For .text, we assume we are + * for a while be unusable. For .text, we assume we are * covered by a BAT. This works only for non-G5 at this * point. G5 will need a better approach, possibly using * a small temporary hash table filled with large mappings, diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 09d31dbf43f9..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) u64 stolen = 0; u64 dtb; + if (!dtl) + return 0; + if (i == vpa->dtl_idx) return 0; while (i < vpa->dtl_idx) { @@ -356,7 +359,7 @@ void account_system_vtime(struct task_struct *tsk) } get_paca()->user_time_scaled += user_scaled; - if (in_irq() || idle_task(smp_processor_id()) != tsk) { + if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { account_system_time(tsk, 0, delta, sys_scaled); if (stolen) account_steal_time(stolen); @@ -577,14 +580,21 @@ void timer_interrupt(struct pt_regs * regs) struct clock_event_device *evt = &decrementer->event; u64 now; + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continue to take decrementer exceptions. + */ + set_dec(DECREMENTER_MAX); + + /* Some implementations of hotplug will get timer interrupts while + * offline, just ignore these + */ + if (!cpu_online(smp_processor_id())) + return; + trace_timer_interrupt_entry(regs); __get_cpu_var(irq_stat).timer_irqs++; - /* Ensure a positive value is written to the decrementer, or else - * some CPUs will continuue to take decrementer exceptions */ - set_dec(DECREMENTER_MAX); - #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bd74fac169be..5ddb801bc154 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a - * pattern to occurences etc. -dgibson 31/Mar/2003 */ + * pattern to occurrences etc. -dgibson 31/Mar/2003 */ switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b33643..baa33a7517bc 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for NS16550 compatable serial ports + * udbg for NS16550 compatible serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dcc..cf0c9c9c24f9 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S @@ -19,7 +19,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by adding a nop before the real start. */ nop diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb4082..45ea281e9a21 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -20,7 +20,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by padding before the real start. */ nop diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 757c0bed9a91..b42f76c4948d 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset, #endif } EXPORT_SYMBOL(__dma_sync_page); + +/* + * Return the PFN for a given cpu virtual address returned by + * __dma_alloc_coherent. This is used by dma_mmap_coherent() + */ +unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) +{ + /* This should always be populated, so we don't test every + * level. If that fails, we'll have a nice crash which + * will be as good as a BUG_ON() + */ + pgd_t *pgd = pgd_offset_k(cpu_addr); + pud_t *pud = pud_offset(pgd, cpu_addr); + pmd_t *pmd = pmd_offset(pud, cpu_addr); + pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); + + if (pte_none(*ptep) || !pte_present(*ptep)) + return 0; + return pte_pfn(*ptep); +} diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 3079f6b44cf5..5b7dd4ea02b5 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -192,8 +192,8 @@ htab_insert_pte: rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -495,8 +495,8 @@ htab_special_pfn: rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -813,8 +813,8 @@ ht64_insert_pte: rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_64K ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_64K ld r9,STK_PARM(r9)(r1) /* segment size */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a5991facddce..58a022d0f463 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -753,7 +753,7 @@ void __cpuinit early_init_mmu_secondary(void) mtspr(SPRN_SDR1, _SDR1); /* Initialize STAB/SLB. We use a virtual address as it works - * in real mode on pSeries and we want a virutal address on + * in real mode on pSeries and we want a virtual address on * iSeries anyway */ if (cpu_has_feature(CPU_FTR_SLB)) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a66499650909..57e545b84bf1 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -424,7 +424,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) clear_page(page); /* - * We shouldnt have to do this, but some versions of glibc + * We shouldn't have to do this, but some versions of glibc * require it (ld.so assumes zero filled pages are icache clean) * - Anton */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 0dc95c0aa3be..5ec1dad2a19d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -440,11 +440,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) } /* - * Retreive and validate the ibm,dynamic-memory property of the device tree. + * Retrieve and validate the ibm,dynamic-memory property of the device tree. * * The layout of the ibm,dynamic-memory property is a number N of memblock * list entries followed by N memblock list entries. Each memblock list entry - * contains information as layed out in the of_drconf_cell struct above. + * contains information as laid out in the of_drconf_cell struct above. */ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) { @@ -468,7 +468,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) } /* - * Retreive and validate the ibm,lmb-size property for drconf memory + * Retrieve and validate the ibm,lmb-size property for drconf memory * from the device tree. */ static u64 of_get_lmb_size(struct device_node *memory) @@ -490,7 +490,7 @@ struct assoc_arrays { }; /* - * Retreive and validate the list of associativity arrays for drconf + * Retrieve and validate the list of associativity arrays for drconf * memory from the ibm,associativity-lookup-arrays property of the * device tree.. * @@ -604,7 +604,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, * Returns the size the region should have to enforce the memory limit. * This will either be the original value of size, a truncated value, * or zero. If the returned value of size is 0 the region should be - * discarded as it lies wholy above the memory limit. + * discarded as it lies wholly above the memory limit. */ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 8526bd9d2aa3..af0892209417 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -192,7 +192,7 @@ normal_tlb_miss: or r10,r15,r14 BEGIN_MMU_FTR_SECTION - /* Set the TLB reservation and seach for existing entry. Then load + /* Set the TLB reservation and search for existing entry. Then load * the entry. */ PPC_TLBSRX_DOT(0,r16) @@ -425,13 +425,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) virt_page_table_tlb_miss_fault: /* If we fault here, things are a little bit tricky. We need to call - * either data or instruction store fault, and we need to retreive + * either data or instruction store fault, and we need to retrieve * the original fault address and ESR (for data). * * The thing is, we know that in normal circumstances, this is * always called as a second level tlb miss for SW load or as a first * level TLB miss for HW load, so we should be able to peek at the - * relevant informations in the first exception frame in the PACA. + * relevant information in the first exception frame in the PACA. * * However, we do need to double check that, because we may just hit * a stray kernel pointer or a userland attack trying to hit those diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index c4d2b7167568..cb515cff745c 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -67,7 +67,7 @@ #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ -/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. +/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle. * To configure counter to send value every N cycles set counter to * 2^32 - 1 - N. */ @@ -1470,7 +1470,7 @@ static int cell_global_start(struct op_counter_config *ctr) * trace buffer at the maximum rate possible. The trace buffer is configured * to store the PCs, wrapping when it is full. The performance counter is * initialized to the max hardware count minus the number of events, N, between - * samples. Once the N events have occured, a HW counter overflow occurs + * samples. Once the N events have occurred, a HW counter overflow occurs * causing the generation of a HW counter interrupt which also stops the * writing of the SPU PC values to the trace buffer. Hence the last PC * written to the trace buffer is the SPU PC that we want. Unfortunately, @@ -1656,7 +1656,7 @@ static void cell_handle_interrupt_ppu(struct pt_regs *regs, * The counters were frozen by the interrupt. * Reenable the interrupt and restart the counters. * If there was a race between the interrupt handler and - * the virtual counter routine. The virutal counter + * the virtual counter routine. The virtual counter * routine may have cleared the interrupts. Hence must * use the virt_cntr_inter_mask to re-enable the interrupts. */ diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 80774092db77..8ee51a252cf1 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -207,7 +207,7 @@ static unsigned long get_pc(struct pt_regs *regs) unsigned long mmcra; unsigned long slot; - /* Cant do much about it */ + /* Can't do much about it */ if (!cur_cpu_spec->oprofile_mmcra_sihv) return pc; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 6385d883cb8d..9940ce8a2d4e 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo { static struct mpc52xx_lpbfifo lpbfifo; /** - * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered + * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred */ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) { @@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * * On transmit, the dma completion irq triggers before the fifo completion * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm - * task completion irq becuase everyting is not really done until the LPB FIFO + * task completion irq because everything is not really done until the LPB FIFO * completion irq triggers. * * In other words: @@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * Exit conditions: * 1) Transfer aborted * 2) FIFO complete without DMA; more data to do - * 3) FIFO complete without DMA; all data transfered + * 3) FIFO complete without DMA; all data transferred * 4) FIFO complete using DMA * * Condition 1 can occur regardless of whether or not DMA is used. diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 3ddea96273ca..1dd15400f6f0 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -512,7 +512,7 @@ void __init mpc52xx_init_irq(void) /** * mpc52xx_get_irq - Get pending interrupt number hook function * - * Called by the interupt handler to determine what IRQ handler needs to be + * Called by the interrupt handler to determine what IRQ handler needs to be * executed. * * Status of pending interrupts is determined by reading the encoded status diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 20576829eca5..f7b07720aa30 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -46,7 +46,7 @@ config PPC_OF_BOOT_TRAMPOLINE help Support from booting from Open Firmware or yaboot using an Open Firmware client interface. This enables the kernel to - communicate with open firmware to retrieve system informations + communicate with open firmware to retrieve system information such as the device tree. In case of doubt, say Y diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index a19bec078703..44cfd1bef89b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -244,7 +244,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, break; case IIC_IRQ_TYPE_IOEXC: irq_set_chip_and_handler(virq, &iic_ioexc_chip, - handle_iic_irq); + handle_edge_eoi_irq); break; default: irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index 3b894f585280..147069938cfe 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c @@ -90,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa) */ for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { /* XXX This is likely to fail, we should use a special pool - * similiar to what hugetlbfs does. + * similar to what hugetlbfs does. */ csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, SPU_64K_PAGE_ORDER); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b0466284932..65203857b0ce 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node) struct list_head *rq = &spu_prio->runq[best]; list_for_each_entry(ctx, rq, rq) { - /* XXX(hch): check for affinity here aswell */ + /* XXX(hch): check for affinity here as well */ if (__node_allowed(ctx, node)) { __spu_del_from_rq(ctx); goto found; diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c index 21a9c952d88b..72c905f1ee7a 100644 --- a/arch/powerpc/platforms/cell/spufs/spu_restore.c +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c @@ -284,7 +284,7 @@ static inline void restore_complete(void) exit_instrs[3] = BR_INSTR; break; default: - /* SPU_Status[R]=1. No additonal instructions. */ + /* SPU_Status[R]=1. No additional instructions. */ break; } spu_sync(); diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index b5e026bdca21..62dabe3c2bfa 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -51,7 +51,7 @@ static int mf_initialized; /* - * This is the structure layout for the Machine Facilites LPAR event + * This is the structure layout for the Machine Facilities LPAR event * flows. */ struct vsp_cmd_data { diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index b5f05d943a90..2376069cdc14 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c @@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, source inst (%d) doesnt match (%d)\n", + "int msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xSourceInstanceId); return; @@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, target inst (%d) doesnt match (%d)\n", + "int msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xTargetInstanceId); return; @@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "ack msg rcvd, source inst (%d) doesnt match (%d)\n", + "ack msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xSourceInstanceId); return; @@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n", + "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xTargetInstanceId); return; diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 09695ae50f91..321a9b3a2d00 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, } EXPORT_SYMBOL(pasemi_dma_free_buf); -/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization +/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization * - * Allocates a flag for use with channel syncronization (event descriptors). + * Allocates a flag for use with channel synchronization (event descriptors). * Returns allocated flag (0-63), < 0 on error. */ int pasemi_dma_alloc_flag(void) diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 50f169392551..ea47df66fee5 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o -# CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really +# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really # CONFIG_NVRAM=y obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 480567e5fa9a..e9c8a607268e 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void) printk(KERN_INFO "SMU i2c %s\n", controller->full_name); /* Look for childs, note that they might not be of the right - * type as older device trees mix i2c busses and other thigns + * type as older device trees mix i2c busses and other things * at the same level */ for (busnode = NULL; diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index ab6898942700..f33e08d573ce 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose, * This function deals with some "special cases" devices. * * 0 -> No special case - * 1 -> Skip the device but act as if the access was successfull + * 1 -> Skip the device but act as if the access was successful * (return 0xff's on reads, eventually, cache config space * accesses in a later version) * -1 -> Hide the device (unsuccessful access) diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f0..20468f49aec0 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); -extern void pmac32_cpu_die(void); extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index d5aceb7fb125..aa45281bd296 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } - -#ifdef CONFIG_HOTPLUG_CPU -/* access per cpu vars from generic smp.c */ -DECLARE_PER_CPU(int, cpu_state); - -static void pmac64_cpu_die(void) -{ - /* - * turn off as much as possible, we'll be - * kicked out as this will only be invoked - * on core99 platforms for now ... - */ - - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; - smp_wmb(); - - /* - * during the path that leads here preemption is disabled, - * reenable it now so that when coming up preempt count is - * zero correctly - */ - preempt_enable(); - - /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) - */ - hard_irq_disable(); - - while (1) { - /* let's not take timer interrupts too often ... */ - set_dec(0x7fffffff); - - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); - else { - HMT_low(); - HMT_very_low(); - } - } -} -#endif /* CONFIG_HOTPLUG_CPU */ - #endif /* CONFIG_PPC64 */ define_machine(powermac) { @@ -726,15 +681,4 @@ define_machine(powermac) { .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif -#ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_PPC64 - .cpu_die = pmac64_cpu_die, -#endif -#ifdef CONFIG_PPC32 - .cpu_die = pmac32_cpu_die, -#endif -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) - .cpu_die = generic_mach_cpu_die, -#endif }; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b6..bc5f0dc6ae1e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -840,92 +840,151 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) /* Setup openpic */ mpic_setup_this_cpu(); +} - if (cpu_nr == 0) { #ifdef CONFIG_PPC64 - extern void g5_phy_disable_cpu1(void); +#ifdef CONFIG_HOTPLUG_CPU +static int smp_core99_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int rc; - /* Close i2c bus if it was used for tb sync */ + switch(action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + /* Open i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) { - pmac_i2c_close(pmac_tb_clock_chip_host); - pmac_tb_clock_chip_host = NULL; + rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); + if (rc) { + pr_err("Failed to open i2c bus for time sync\n"); + return notifier_from_errno(rc); + } } + break; + case CPU_ONLINE: + case CPU_UP_CANCELED: + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + break; + default: + break; + } + return NOTIFY_OK; +} - /* If we didn't start the second CPU, we must take - * it off the bus - */ - if (of_machine_is_compatible("MacRISC4") && - num_online_cpus() < 2) - g5_phy_disable_cpu1(); -#endif /* CONFIG_PPC64 */ +static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { + .notifier_call = smp_core99_cpu_notify, +}; +#endif /* CONFIG_HOTPLUG_CPU */ + +static void __init smp_core99_bringup_done(void) +{ + extern void g5_phy_disable_cpu1(void); - if (ppc_md.progress) - ppc_md.progress("core99_setup_cpu 0 done", 0x349); + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + + /* If we didn't start the second CPU, we must take + * it off the bus. + */ + if (of_machine_is_compatible("MacRISC4") && + num_online_cpus() < 2) { + set_cpu_present(1, false); + g5_phy_disable_cpu1(); } -} +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&smp_core99_cpu_nb); +#endif + if (ppc_md.progress) + ppc_md.progress("smp_core99_bringup_done", 0x349); +} +#endif /* CONFIG_PPC64 */ -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) +#ifdef CONFIG_HOTPLUG_CPU -int smp_core99_cpu_disable(void) +static int smp_core99_cpu_disable(void) { - set_cpu_online(smp_processor_id(), false); + int rc = generic_cpu_disable(); + if (rc) + return rc; - /* XXX reset cpu affinity here */ mpic_cpu_set_priority(0xf); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - mb(); - udelay(20); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); + return 0; } -static int cpu_dead[NR_CPUS]; +#ifdef CONFIG_PPC32 -void pmac32_cpu_die(void) +static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); - cpu_dead[smp_processor_id()] = 1; + idle_task_exit(); + pr_debug("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); mb(); low_cpu_die(); } -void smp_core99_cpu_die(unsigned int cpu) +#else /* CONFIG_PPC32 */ + +static void pmac_cpu_die(void) { - int timeout; + int cpu = smp_processor_id(); - timeout = 1000; - while (!cpu_dead[cpu]) { - if (--timeout == 0) { - printk("CPU %u refused to die!\n", cpu); - break; - } - msleep(1); + local_irq_disable(); + idle_task_exit(); + + /* + * turn off as much as possible, we'll be + * kicked out as this will only be invoked + * on core99 platforms for now ... + */ + + printk(KERN_INFO "CPU#%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); + + /* + * Re-enable interrupts. The NAP code needs to enable them + * anyways, do it now so we deal with the case where one already + * happened while soft-disabled. + * We shouldn't get any external interrupts, only decrementer, and the + * decrementer handler is safe for use on offline CPUs + */ + local_irq_enable(); + + while (1) { + /* let's not take timer interrupts too often ... */ + set_dec(0x7fffffff); + + /* Enter NAP mode */ + power4_idle(); } - cpu_dead[cpu] = 0; } -#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ +#endif /* else CONFIG_PPC32 */ +#endif /* CONFIG_HOTPLUG_CPU */ /* Core99 Macs (dual G4s and G5s) */ struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, +#ifdef CONFIG_PPC64 + .bringup_done = smp_core99_bringup_done, +#endif .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, .take_timebase = smp_core99_take_timebase, #if defined(CONFIG_HOTPLUG_CPU) -# if defined(CONFIG_PPC32) .cpu_disable = smp_core99_cpu_disable, - .cpu_die = smp_core99_cpu_die, -# endif -# if defined(CONFIG_PPC64) - .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, - /* intentionally do *NOT* assign cpu_enable, - * the generic code will use kick_cpu then! */ -# endif #endif }; @@ -957,5 +1016,10 @@ void __init pmac_setup_smp(void) smp_ops = &psurge_smp_ops; } #endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_HOTPLUG_CPU + ppc_md.cpu_die = pmac_cpu_die; +#endif } + diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b74a9230edc9..57ceb92b2288 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -74,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) return NULL; /* The configure connector reported name does not contain a - * preceeding '/', so we allocate a buffer large enough to + * preceding '/', so we allocate a buffer large enough to * prepend this to the full_name. */ name = (char *)ccwa + ccwa->name_offset; diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 3cc4d102b1f1..89649173d3a3 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -65,7 +65,7 @@ * with EEH. * * Ideally, a PCI device driver, when suspecting that an isolation - * event has occured (e.g. by reading 0xff's), will then ask EEH + * event has occurred (e.g. by reading 0xff's), will then ask EEH * whether this is the case, and then take appropriate steps to * reset the PCI slot, the PCI device, and then resume operations. * However, until that day, the checking is done here, with the diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fd50ccd4bac1..ef8c45489e20 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -216,7 +216,7 @@ static void pseries_cpu_die(unsigned int cpu) cpu, pcpu, cpu_status); } - /* Isolation and deallocation are definatly done by + /* Isolation and deallocation are definitely done by * drslot_chrp_cpu. If they were not they would be * done here. Change isolate state to Isolate and * change allocation-state to Unusable. diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 154c464cdca5..6d5412a18b26 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -272,7 +272,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) return tce_ret; } -/* this is compatable with cells for the device tree property */ +/* this is compatible with cells for the device tree property */ struct dynamic_dma_window_prop { __be32 liobn; /* tce table number */ __be64 dma_base; /* address hi,lo */ @@ -976,7 +976,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); /* dev setup for LPAR is a little tricky, since the device tree might - * contain the dma-window properties per-device and not neccesarily + * contain the dma-window properties per-device and not necessarily * for the bus. So we need to search upwards in the tree until we * either hit a dma-window property, OR find a parent with a table * already allocated. @@ -1033,7 +1033,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) /* * the device tree might contain the dma-window properties - * per-device and not neccesarily for the bus. So we need to + * per-device and not necessarily for the bus. So we need to * search upwards in the tree until we either hit a dma-window * property, OR find a parent with a table already allocated. */ diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 419707b07248..00cc3a094885 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -480,8 +480,32 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, const char *new_msgs, unsigned long new_len) { static unsigned int oops_count = 0; + static bool panicking = false; size_t text_len; + switch (reason) { + case KMSG_DUMP_RESTART: + case KMSG_DUMP_HALT: + case KMSG_DUMP_POWEROFF: + /* These are almost always orderly shutdowns. */ + return; + case KMSG_DUMP_OOPS: + case KMSG_DUMP_KEXEC: + break; + case KMSG_DUMP_PANIC: + panicking = true; + break; + case KMSG_DUMP_EMERG: + if (panicking) + /* Panic report already captured. */ + return; + break; + default: + pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", + __FUNCTION__, (int) reason); + return; + } + if (clobbering_unread_rtas_event()) return; diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d931..08672d9136ab 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) #endif extern enum cpu_state_vals get_preferred_offline_state(int cpu); -extern int start_secondary(void); -extern void start_secondary_resume(void); #endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c319d04aa799..6c42cfde8415 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void) int cpu, ret; struct paca_struct *pp; struct dtl_entry *dtl; + struct kmem_cache *dtl_cache; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; + dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, NULL); + if (!dtl_cache) { + pr_warn("Failed to create dispatch trace log buffer cache\n"); + pr_warn("Stolen time statistics will be unreliable\n"); + return 0; + } + for_each_possible_cpu(cpu) { pp = &paca[cpu]; - dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, - cpu_to_node(cpu)); + dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", cpu); @@ -378,7 +386,7 @@ static int __init pSeries_init_panel(void) return 0; } -arch_initcall(pSeries_init_panel); +machine_arch_initcall(pseries, pSeries_init_panel); static int pseries_set_dabr(unsigned long dabr) { diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 0317cce877c6..a509c5292a67 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -64,8 +64,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) int qcss_tok = rtas_token("query-cpu-stopped-state"); if (qcss_tok == RTAS_UNKNOWN_SERVICE) { - printk(KERN_INFO "Firmware doesn't support " - "query-cpu-stopped-state\n"); + printk_once(KERN_INFO + "Firmware doesn't support query-cpu-stopped-state\n"); return QCSS_HARDWARE_ERROR; } @@ -112,10 +112,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; - +#ifdef CONFIG_HOTPLUG_CPU if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) goto out; - +#endif /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. @@ -130,7 +130,9 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) return 0; } +#ifdef CONFIG_HOTPLUG_CPU out: +#endif return 1; } @@ -144,16 +146,15 @@ static void __devinit smp_xics_setup_cpu(int cpu) vpa_init(cpu); cpumask_clear_cpu(cpu, of_spin_mask); +#ifdef CONFIG_HOTPLUG_CPU set_cpu_current_state(cpu, CPU_STATE_ONLINE); set_default_offline_state(cpu); - +#endif } #endif /* CONFIG_XICS */ static void __devinit smp_pSeries_kick_cpu(int nr) { - long rc; - unsigned long hcpuid; BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) @@ -165,16 +166,20 @@ static void __devinit smp_pSeries_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; - +#ifdef CONFIG_HOTPLUG_CPU set_preferred_offline_state(nr, CPU_STATE_ONLINE); if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { + long rc; + unsigned long hcpuid; + hcpuid = get_hard_smp_processor_id(nr); rc = plpar_hcall_norets(H_PROD, hcpuid); if (rc != H_SUCCESS) printk(KERN_ERR "Error: Prod to wake up processor %d " "Ret= %ld\n", nr, rc); } +#endif } static int smp_pSeries_cpu_bootable(unsigned int nr) diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 6c1e638f0ce9..d6901334d66e 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -204,33 +204,33 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, static void xics_unmask_irq(struct irq_data *d) { - unsigned int irq; + unsigned int hwirq; int call_status; int server; pr_devel("xics: unmask virq %d\n", d->irq); - irq = (unsigned int)irq_map[d->irq].hwirq; - pr_devel(" -> map to hwirq 0x%x\n", irq); - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + pr_devel(" -> map to hwirq 0x%x\n", hwirq); + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return; server = get_irq_server(d->irq, d->affinity, 0); - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", - __func__, irq, server, call_status); + __func__, hwirq, server, call_status); return; } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", - __func__, irq, call_status); + __func__, hwirq, call_status); return; } } @@ -250,46 +250,46 @@ static unsigned int xics_startup(struct irq_data *d) return 0; } -static void xics_mask_real_irq(struct irq_data *d) +static void xics_mask_real_irq(unsigned int hwirq) { int call_status; - if (d->irq == XICS_IPI) + if (hwirq == XICS_IPI) return; - call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq); + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", - __func__, d->irq, call_status); + __func__, hwirq, call_status); return; } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq, + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", - __func__, d->irq, call_status); + __func__, hwirq, call_status); return; } } static void xics_mask_irq(struct irq_data *d) { - unsigned int irq; + unsigned int hwirq; pr_devel("xics: mask virq %d\n", d->irq); - irq = (unsigned int)irq_map[d->irq].hwirq; - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return; - xics_mask_real_irq(d); + xics_mask_real_irq(hwirq); } static void xics_mask_unknown_vec(unsigned int vec) { printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); - xics_mask_real_irq(irq_get_irq_data(vec)); + xics_mask_real_irq(vec); } static inline unsigned int xics_xirr_vector(unsigned int xirr) @@ -373,37 +373,37 @@ static unsigned char pop_cppr(void) static void xics_eoi_direct(struct irq_data *d) { - unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; iosync(); - direct_xirr_info_set((pop_cppr() << 24) | irq); + direct_xirr_info_set((pop_cppr() << 24) | hwirq); } static void xics_eoi_lpar(struct irq_data *d) { - unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; iosync(); - lpar_xirr_info_set((pop_cppr() << 24) | irq); + lpar_xirr_info_set((pop_cppr() << 24) | hwirq); } static int xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { - unsigned int irq; + unsigned int hwirq; int status; int xics_status[2]; int irq_server; - irq = (unsigned int)irq_map[d->irq].hwirq; - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); return -1; } @@ -418,11 +418,11 @@ xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) } status = rtas_call(ibm_set_xive, 3, 1, NULL, - irq, irq_server, xics_status[1]); + hwirq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); return -1; } @@ -874,7 +874,7 @@ void xics_kexec_teardown_cpu(int secondary) void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); - unsigned int irq, virq; + int virq; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == default_server) @@ -892,18 +892,19 @@ void xics_migrate_irqs_away(void) for_each_irq(virq) { struct irq_desc *desc; struct irq_chip *chip; + unsigned int hwirq; int xics_status[2]; int status; unsigned long flags; - /* We cant set affinity on ISA interrupts */ + /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; if (irq_map[virq].host != xics_host) continue; - irq = (unsigned int)irq_map[virq].hwirq; + hwirq = (unsigned int)irq_map[virq].hwirq; /* We need to get IPIs still. */ - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) continue; desc = irq_to_desc(virq); @@ -918,10 +919,10 @@ void xics_migrate_irqs_away(void) raw_spin_lock_irqsave(&desc->lock, flags); - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); goto unlock; } diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 27402c7d309d..1636dd896707 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -95,7 +95,7 @@ axon_ram_irq_handler(int irq, void *dev) BUG_ON(!bank); - dev_err(&device->dev, "Correctable memory error occured\n"); + dev_err(&device->dev, "Correctable memory error occurred\n"); bank->ecc_counter++; return IRQ_HANDLED; } diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h index 23a95f80dfdb..a0e2e6b19b57 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h @@ -20,7 +20,7 @@ * struct bcom_bd - Structure describing a generic BestComm buffer descriptor * @status: The current status of this buffer. Exact meaning depends on the * task type - * @data: An array of u32 extra data. Size of array is task dependant. + * @data: An array of u32 extra data. Size of array is task dependent. * * Note: Don't dereference a bcom_bd pointer as an array. The size of the * bcom_bd is variable. Use bcom_get_bd() instead. diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h index eb0d1c883c31..3b52f3ffbdf8 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h @@ -97,7 +97,7 @@ struct bcom_task_header { u8 reserved[8]; }; -/* Descriptors stucture & co */ +/* Descriptors structure & co */ #define BCOM_DESC_NOP 0x000001f8 #define BCOM_LCD_MASK 0x80000000 #define BCOM_DRD_EXTENDED 0x40000000 diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 8b5aba263323..e0bc944eb23f 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -223,7 +223,7 @@ void __init cpm_reset(void) /* Set SDMA Bus Request priority 5. * On 860T, this also enables FEC priority 6. I am not sure - * this is what we realy want for some applications, but the + * this is what we really want for some applications, but the * manual recommends it. * Bit 25, FAM can also be set to use FEC aggressive mode (860T). */ diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index f8f7f28c6343..68ca9290df94 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) struct resource rsrc; const int *bus_range; + if (!of_device_is_available(dev)) { + pr_warning("%s: disabled\n", dev->full_name); + return -ENODEV; + } + pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 14232d57369c..49798532b477 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev) port->ops = ops; port->priv = priv; port->phys_efptr = 0x100; - rio_register_mport(port); priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); rio_regs_win = priv->regs_win; @@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev) dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", port->sys_size ? 65536 : 256); + if (rio_register_mport(port)) + goto err; + if (port->host_deviceid >= 0) out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c index 7ed809676642..82fdad885d20 100644 --- a/arch/powerpc/sysdev/indirect_pci.c +++ b/arch/powerpc/sysdev/indirect_pci.c @@ -117,7 +117,7 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | (devfn << 8) | reg | cfg_type)); - /* surpress setting of PCI_PRIMARY_BUS */ + /* suppress setting of PCI_PRIMARY_BUS */ if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) if ((offset == PCI_PRIMARY_BUS) && (bus->number == hose->first_busno)) diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index f550e23632f8..a88800ff4d01 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -80,7 +80,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) if ((hw & 1) == 0) { siel |= (0x80000000 >> hw); out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(irq, handle_edge_irq); + __irq_set_handler_locked(d->irq, handle_edge_irq); } } return 0; diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h index 56d9e5deccbf..c39a134e8684 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.h +++ b/arch/powerpc/sysdev/ppc4xx_pci.h @@ -324,7 +324,7 @@ #define PESDR0_460EX_IHS2 0x036D /* - * 460SX addtional DCRs + * 460SX additional DCRs */ #define PESDRn_460SX_RCEI 0x02 |