summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c189
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S17
-rw-r--r--arch/powerpc/kernel/cputable.c137
-rw-r--r--arch/powerpc/kernel/crash.c13
-rw-r--r--arch/powerpc/kernel/crash_dump.c11
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/fpu.S6
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_64.S29
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/iommu.c23
-rw-r--r--arch/powerpc/kernel/irq.c29
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c99
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c8
-rw-r--r--arch/powerpc/kernel/pci_64.c63
-rw-r--r--arch/powerpc/kernel/pci_direct_iommu.c18
-rw-r--r--arch/powerpc/kernel/pci_dn.c6
-rw-r--r--arch/powerpc/kernel/pci_iommu.c41
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c4
-rw-r--r--arch/powerpc/kernel/process.c55
-rw-r--r--arch/powerpc/kernel/prom.c145
-rw-r--r--arch/powerpc/kernel/prom_init.c130
-rw-r--r--arch/powerpc/kernel/prom_parse.c25
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c30
-rw-r--r--arch/powerpc/kernel/rtas.c108
-rw-r--r--arch/powerpc/kernel/rtas_flash.c25
-rw-r--r--arch/powerpc/kernel/rtas_pci.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c20
-rw-r--r--arch/powerpc/kernel/setup.h3
-rw-r--r--arch/powerpc/kernel/setup_32.c18
-rw-r--r--arch/powerpc/kernel/setup_64.c31
-rw-r--r--arch/powerpc/kernel/signal_32.c34
-rw-r--r--arch/powerpc/kernel/signal_64.c18
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/systbl.S311
-rw-r--r--arch/powerpc/kernel/time.c67
-rw-r--r--arch/powerpc/kernel/traps.c23
-rw-r--r--arch/powerpc/kernel/udbg.c7
-rw-r--r--arch/powerpc/kernel/vdso.c57
-rw-r--r--arch/powerpc/kernel/vector.S4
-rw-r--r--arch/powerpc/kernel/vio.c344
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
49 files changed, 1229 insertions, 970 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index faaec9c6f78f..4734b5de599d 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -35,17 +35,19 @@ struct aligninfo {
#define INVALID { 0, 0 }
-#define LD 1 /* load */
-#define ST 2 /* store */
-#define SE 4 /* sign-extend value */
-#define F 8 /* to/from fp regs */
-#define U 0x10 /* update index register */
-#define M 0x20 /* multiple load/store */
-#define SW 0x40 /* byte swap int or ... */
-#define S 0x40 /* ... single-precision fp */
-#define SX 0x40 /* byte count in XER */
+/* Bits in the flags field */
+#define LD 0 /* load */
+#define ST 1 /* store */
+#define SE 2 /* sign-extend value */
+#define F 4 /* to/from fp regs */
+#define U 8 /* update index register */
+#define M 0x10 /* multiple load/store */
+#define SW 0x20 /* byte swap */
+#define S 0x40 /* single-precision fp or... */
+#define SX 0x40 /* ... byte count in XER */
#define HARD 0x80 /* string, stwcx. */
+/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
+#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
- unsigned int flags, unsigned int instr)
+ unsigned int flags, unsigned int instr,
+ unsigned long swiz)
{
unsigned long *rptr;
- unsigned int nb0, i;
+ unsigned int nb0, i, bswiz;
+ unsigned long p;
/*
* We do not try to emulate 8 bytes multiple as they aren't really
@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
if (nb == 0)
return 1;
} else {
- if (__get_user(instr,
- (unsigned int __user *)regs->nip))
+ unsigned long pc = regs->nip ^ (swiz & 4);
+
+ if (__get_user(instr, (unsigned int __user *)pc))
return -EFAULT;
+ if (swiz == 0 && (flags & SW))
+ instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
if (nb == 0)
nb = 32;
@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
return -EFAULT; /* bad address */
rptr = &regs->gpr[reg];
- if (flags & LD) {
+ p = (unsigned long) addr;
+ bswiz = (flags & SW)? 3: 0;
+
+ if (!(flags & ST)) {
/*
* This zeroes the top 4 bytes of the affected registers
* in 64-bit mode, and also zeroes out any remaining
@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
memset(&regs->gpr[0], 0,
((nb0 + 3) / 4) * sizeof(unsigned long));
- for (i = 0; i < nb; ++i)
- if (__get_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb; ++i, ++p)
+ if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
- for (i = 0; i < nb0; ++i)
- if (__get_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb0; ++i, ++p)
+ if (__get_user(REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
- for (i = 0; i < nb; ++i)
- if (__put_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb; ++i, ++p)
+ if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
- for (i = 0; i < nb0; ++i)
- if (__put_user(REG_BYTE(rptr, i), addr + i))
+ for (i = 0; i < nb0; ++i, ++p)
+ if (__put_user(REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int reg, areg;
unsigned int dsisr;
unsigned char __user *addr;
- unsigned char __user *p;
+ unsigned long p, swiz;
int ret, t;
union {
u64 ll;
@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs)
* let's make one up from the instruction
*/
if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
- unsigned int real_instr;
- if (unlikely(__get_user(real_instr,
- (unsigned int __user *)regs->nip)))
+ unsigned long pc = regs->nip;
+
+ if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
+ pc ^= 4;
+ if (unlikely(__get_user(instr, (unsigned int __user *)pc)))
return -EFAULT;
- dsisr = make_dsisr(real_instr);
+ if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
+ instr = cpu_to_le32(instr);
+ dsisr = make_dsisr(instr);
}
/* extract the operation and registers from the dsisr */
@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
+ /* Byteswap little endian loads and stores */
+ swiz = 0;
+ if (regs->msr & MSR_LE) {
+ flags ^= SW;
+ /*
+ * So-called "PowerPC little endian" mode works by
+ * swizzling addresses rather than by actually doing
+ * any byte-swapping. To emulate this, we XOR each
+ * byte address with 7. We also byte-swap, because
+ * the processor's address swizzling depends on the
+ * operand size (it xors the address with 7 for bytes,
+ * 6 for halfwords, 4 for words, 0 for doublewords) but
+ * we will xor with 7 and load/store each byte separately.
+ */
+ if (cpu_has_feature(CPU_FTR_PPC_LE))
+ swiz = 7;
+ }
+
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs)
* function
*/
if (flags & M)
- return emulate_multiple(regs, addr, reg, nb, flags, instr);
+ return emulate_multiple(regs, addr, reg, nb,
+ flags, instr, swiz);
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs)
/* If we are loading, get the data from user space, else
* get it from register values
*/
- if (flags & LD) {
+ if (!(flags & ST)) {
data.ll = 0;
ret = 0;
- p = addr;
+ p = (unsigned long) addr;
switch (nb) {
case 8:
- ret |= __get_user(data.v[0], p++);
- ret |= __get_user(data.v[1], p++);
- ret |= __get_user(data.v[2], p++);
- ret |= __get_user(data.v[3], p++);
+ ret |= __get_user(data.v[0], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[1], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[2], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __get_user(data.v[4], p++);
- ret |= __get_user(data.v[5], p++);
+ ret |= __get_user(data.v[4], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[5], SWIZ_PTR(p++));
case 2:
- ret |= __get_user(data.v[6], p++);
- ret |= __get_user(data.v[7], p++);
+ ret |= __get_user(data.v[6], SWIZ_PTR(p++));
+ ret |= __get_user(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret))
return -EFAULT;
}
- } else if (flags & F)
+ } else if (flags & F) {
data.dd = current->thread.fpr[reg];
- else
+ if (flags & S) {
+ /* Single-precision FP store requires conversion... */
+#ifdef CONFIG_PPC_FPU
+ preempt_disable();
+ enable_kernel_fp();
+ cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
+ preempt_enable();
+#else
+ return 0;
+#endif
+ }
+ } else
data.ll = regs->gpr[reg];
- /* Perform other misc operations like sign extension, byteswap,
+ if (flags & SW) {
+ switch (nb) {
+ case 8:
+ SWAP(data.v[0], data.v[7]);
+ SWAP(data.v[1], data.v[6]);
+ SWAP(data.v[2], data.v[5]);
+ SWAP(data.v[3], data.v[4]);
+ break;
+ case 4:
+ SWAP(data.v[4], data.v[7]);
+ SWAP(data.v[5], data.v[6]);
+ break;
+ case 2:
+ SWAP(data.v[6], data.v[7]);
+ break;
+ }
+ }
+
+ /* Perform other misc operations like sign extension
* or floating point single precision conversion
*/
- switch (flags & ~U) {
+ switch (flags & ~(U|SW)) {
case LD+SE: /* sign extend */
if ( nb == 2 )
data.ll = data.x16.low16;
else /* nb must be 4 */
data.ll = data.x32.low32;
break;
- case LD+S: /* byte-swap */
- case ST+S:
- if (nb == 2) {
- SWAP(data.v[6], data.v[7]);
- } else {
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
- }
- break;
- /* Single-precision FP load and store require conversions... */
+ /* Single-precision FP load requires conversion... */
case LD+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
@@ -486,34 +543,24 @@ int fix_alignment(struct pt_regs *regs)
return 0;
#endif
break;
- case ST+F+S:
-#ifdef CONFIG_PPC_FPU
- preempt_disable();
- enable_kernel_fp();
- cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
- preempt_enable();
-#else
- return 0;
-#endif
- break;
}
/* Store result to memory or update registers */
if (flags & ST) {
ret = 0;
- p = addr;
+ p = (unsigned long) addr;
switch (nb) {
case 8:
- ret |= __put_user(data.v[0], p++);
- ret |= __put_user(data.v[1], p++);
- ret |= __put_user(data.v[2], p++);
- ret |= __put_user(data.v[3], p++);
+ ret |= __put_user(data.v[0], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[1], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[2], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __put_user(data.v[4], p++);
- ret |= __put_user(data.v[5], p++);
+ ret |= __put_user(data.v[4], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[5], SWIZ_PTR(p++));
case 2:
- ret |= __put_user(data.v[6], p++);
- ret |= __put_user(data.v[7], p++);
+ ret |= __put_user(data.v[6], SWIZ_PTR(p++));
+ ret |= __put_user(data.v[7], SWIZ_PTR(p++));
}
if (unlikely(ret))
return -EFAULT;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8f85c5e8a55a..ff2940548929 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -122,9 +122,8 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
-#ifdef CONFIG_PPC_64K_PAGES
- DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
-#endif
+ DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
+ DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 55ed7716636f..365381fcb27c 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -210,9 +210,11 @@ setup_745x_specifics:
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
*/
+BEGIN_FTR_SECTION
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
+END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index b61d86e7ceb6..271418308d53 100644
--- a/arch/powerpc/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -73,23 +73,6 @@ _GLOBAL(__970_cpu_preinit)
isync
blr
-_GLOBAL(__setup_cpu_power4)
- blr
-
-_GLOBAL(__setup_cpu_be)
- /* Set large page sizes LP=0: 16MB, LP=1: 64KB */
- addi r3, 0, 0
- ori r3, r3, HID6_LB
- sldi r3, r3, 32
- nor r3, r3, r3
- mfspr r4, SPRN_HID6
- and r4, r4, r3
- addi r3, 0, 0x02000
- sldi r3, r3, 32
- or r4, r4, r3
- mtspr SPRN_HID6, r4
- blr
-
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3f7182db9ed5..1c114880dc05 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -30,11 +30,7 @@ EXPORT_SYMBOL(cur_cpu_spec);
* part of the cputable though. That has to be fixed for both ppc32
* and ppc64
*/
-#ifdef CONFIG_PPC64
-extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
-#else
+#ifdef CONFIG_PPC32
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -58,7 +54,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE)
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
@@ -78,11 +75,10 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00400000,
.cpu_name = "POWER3 (630)",
.cpu_features = CPU_FTRS_POWER3,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "power3",
@@ -92,11 +88,10 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00410000,
.cpu_name = "POWER3 (630+)",
.cpu_features = CPU_FTRS_POWER3,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "power3",
@@ -110,7 +105,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -124,7 +118,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -138,7 +131,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -152,7 +144,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.platform = "rs64",
@@ -166,7 +157,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power4",
@@ -180,7 +170,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power4",
@@ -200,17 +189,11 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
-#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
{ /* PPC970FX */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003c0000,
.cpu_name = "PPC970FX",
-#ifdef CONFIG_PPC32
- .cpu_features = CPU_FTRS_970_32,
-#else
.cpu_features = CPU_FTRS_PPC970,
-#endif
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
@@ -221,8 +204,6 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
-#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
-#ifdef CONFIG_PPC64
{ /* PPC970MP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00440000,
@@ -232,6 +213,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .num_pmcs = 8,
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
@@ -246,9 +228,13 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power5",
.oprofile_type = PPC_OPROFILE_POWER4,
+ /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
+ * and above but only works on POWER5 and above
+ */
+ .oprofile_mmcra_sihv = MMCRA_SIHV,
+ .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5",
},
{ /* Power5 GS */
@@ -260,9 +246,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.oprofile_cpu_type = "ppc64/power5+",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = MMCRA_SIHV,
+ .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* Power6 */
@@ -273,10 +260,13 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
+ .num_pmcs = 8,
.oprofile_cpu_type = "ppc64/power6",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
.platform = "power6",
},
{ /* Cell Broadband Engine */
@@ -289,7 +279,6 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_SMT,
.icache_bsize = 128,
.dcache_bsize = 128,
- .cpu_setup = __setup_cpu_be,
.platform = "ppc-cell-be",
},
{ /* default match */
@@ -301,7 +290,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_power4,
.platform = "power4",
}
#endif /* CONFIG_PPC64 */
@@ -323,7 +311,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00030000,
.cpu_name = "603",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -334,7 +322,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00060000,
.cpu_name = "603e",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -345,7 +333,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00070000,
.cpu_name = "603ev",
.cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -356,7 +344,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00040000,
.cpu_name = "604",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 2,
@@ -368,7 +356,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00090000,
.cpu_name = "604e",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -380,7 +368,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00090000,
.cpu_name = "604r",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -392,7 +380,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000a0000,
.cpu_name = "604ev",
.cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -404,7 +392,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00084202,
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740_NOTAU,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -416,7 +404,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00080100,
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -428,7 +416,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00082200,
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -440,7 +428,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00082210,
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -452,7 +440,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00083214,
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -464,7 +452,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00083000,
.cpu_name = "745/755",
.cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -476,7 +464,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000100,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX1,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -488,7 +476,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000200,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX2,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -500,7 +488,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70000000,
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -512,7 +500,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x70020000,
.cpu_name = "750GX",
.cpu_features = CPU_FTRS_750GX,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -524,7 +512,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00080000,
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740,
- .cpu_user_features = COMMON_USER,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -536,7 +524,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000c1101,
.cpu_name = "7400 (1.1)",
.cpu_features = CPU_FTRS_7400_NOTAU,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -548,7 +537,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x000c0000,
.cpu_name = "7400",
.cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -560,7 +550,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x800c0000,
.cpu_name = "7410",
.cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -572,7 +563,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000200,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_20,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -586,7 +578,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000201,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_21,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -600,7 +593,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80000000,
.cpu_name = "7450",
.cpu_features = CPU_FTRS_7450_23,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -614,7 +608,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010100,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455_1,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -628,7 +623,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010200,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455_20,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -642,7 +638,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80010000,
.cpu_name = "7455",
.cpu_features = CPU_FTRS_7455,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -656,7 +653,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020100,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -670,7 +668,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020101,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -684,7 +683,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80020000,
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -698,7 +697,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80030000,
.cpu_name = "7447A",
.cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -712,7 +712,8 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x80040000,
.cpu_name = "7448",
.cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features = COMMON_USER |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -721,6 +722,18 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_G4,
.platform = "ppc7450",
},
+ { /* 8641 */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80040010,
+ .cpu_name = "8641",
+ .cpu_features = CPU_FTRS_7447A,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .cpu_setup = __setup_cpu_745x
+ },
+
{ /* 82xx (8240, 8245, 8260 are all 603e cores) */
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00810000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 778f22fd85d2..dbcb85994f46 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -22,6 +22,7 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/types.h>
#include <asm/processor.h>
@@ -174,6 +175,8 @@ static void crash_kexec_prepare_cpus(void)
void default_machine_crash_shutdown(struct pt_regs *regs)
{
+ unsigned int irq;
+
/*
* This function is only called after the system
* has paniced or is otherwise in a critical state.
@@ -186,6 +189,16 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
local_irq_disable();
+ for_each_irq(irq) {
+ struct irq_desc *desc = irq_descp(irq);
+
+ if (desc->status & IRQ_INPROGRESS)
+ desc->handler->end(irq);
+
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->disable(irq);
+ }
+
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 764d07329716..371973be8d71 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -25,6 +25,11 @@
#define DBG(fmt...)
#endif
+void reserve_kdump_trampoline(void)
+{
+ lmb_reserve(0, KDUMP_RESERVE_LIMIT);
+}
+
static void __init create_trampoline(unsigned long addr)
{
/* The maximum range of a single instruction branch, is the current
@@ -39,11 +44,11 @@ static void __init create_trampoline(unsigned long addr)
create_branch(addr + 4, addr + PHYSICAL_START, 0);
}
-void __init kdump_setup(void)
+void __init setup_kdump_trampoline(void)
{
unsigned long i;
- DBG(" -> kdump_setup()\n");
+ DBG(" -> setup_kdump_trampoline()\n");
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
create_trampoline(i);
@@ -52,7 +57,7 @@ void __init kdump_setup(void)
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
- DBG(" <- kdump_setup()\n");
+ DBG(" <- setup_kdump_trampoline()\n");
}
#ifdef CONFIG_PROC_VMCORE
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 19ad5c6b1818..221062c960c9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -57,6 +57,7 @@ system_call_common:
beq- 1f
ld r1,PACAKSAVE(r13)
1: std r10,0(r1)
+ crclr so
std r11,_NIP(r1)
std r12,_MSR(r1)
std r0,GPR0(r1)
@@ -75,7 +76,6 @@ system_call_common:
std r11,GPR11(r1)
std r11,GPR12(r1)
std r9,GPR13(r1)
- crclr so
mfcr r9
mflr r10
li r11,0xc01
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 340730fb8c91..01f71200c603 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -72,7 +72,7 @@ _GLOBAL(load_up_fpu)
std r12,_MSR(r1)
#endif
lfd fr0,THREAD_FPSCR(r5)
- mtfsf 0xff,fr0
+ MTFSF_L(fr0)
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
@@ -127,7 +127,7 @@ _GLOBAL(giveup_fpu)
_GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
- mtfsf 0xff,0
+ MTFSF_L(0)
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
@@ -136,7 +136,7 @@ _GLOBAL(cvt_fd)
_GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
- mtfsf 0xff,0
+ MTFSF_L(0)
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index a0579e859b21..b25b25902d15 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -973,6 +973,13 @@ __secondary_start_gemini:
b __secondary_start
#endif /* CONFIG_GEMINI */
+ .globl __secondary_start_mpc86xx
+__secondary_start_mpc86xx:
+ mfspr r3, SPRN_PIR
+ stw r3, __secondary_hold_acknowledge@l(0)
+ mr r24, r3 /* cpu # */
+ b __secondary_start
+
.globl __secondary_start_pmac_0
__secondary_start_pmac_0:
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
@@ -1088,7 +1095,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
-
+BEGIN_FTR_SECTION
+ LOAD_BAT(4,r3,r4,r5)
+ LOAD_BAT(5,r3,r4,r5)
+ LOAD_BAT(6,r3,r4,r5)
+ LOAD_BAT(7,r3,r4,r5)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
blr
/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b7d140430a41..831acbdf2592 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -316,6 +316,21 @@ label##_pSeries: \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+#define HSTD_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r20; /* save r20 */ \
+ mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
+ mtspr SPRN_SRR0,r20; \
+ mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
+ mtspr SPRN_SRR1,r20; \
+ mfspr r20,SPRN_SPRG1; /* restore r20 */ \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+
+
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_iSeries; \
label##_iSeries: \
@@ -544,8 +559,17 @@ system_call_pSeries:
STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+#endif /* CONFIG_CBE_RAS */
. = 0x3000
@@ -827,6 +851,11 @@ machine_check_common:
#else
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
#endif
+#ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
+ STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
+ STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
/*
* Here we have detected that the kernel stack pointer is bad.
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index fd8214caedee..a13a93dfc655 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,8 +106,6 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- if (!_IO_IS_VALID(port))
- return NULL;
return (void __iomem *) (port+pci_io_base);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 4eba60a32890..7cb77c20fc5d 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
-struct iommu_table *iommu_init_table(struct iommu_table *tbl)
+struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
+ struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
- tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
- if (!tbl->it_map)
+ page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
-
+ tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
@@ -536,11 +537,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
+ dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
+ struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
@@ -560,9 +562,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
return NULL;
/* Alloc enough pages (and possibly more) */
- ret = (void *)__get_free_pages(flag, order);
- if (!ret)
+ page = alloc_pages_node(node, flag, order);
+ if (!page)
return NULL;
+ ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
@@ -570,9 +573,9 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
mask >> PAGE_SHIFT, order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
- ret = NULL;
- } else
- *dma_handle = mapping;
+ return NULL;
+ }
+ *dma_handle = mapping;
return ret;
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d560c68897..40d4c14fde8f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -47,6 +47,7 @@
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
+#include <linux/pci.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -379,8 +380,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_IRQSTACKS
-struct thread_info *softirq_ctx[NR_CPUS];
-struct thread_info *hardirq_ctx[NR_CPUS];
+struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
+struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
void irq_ctx_init(void)
{
@@ -436,6 +437,30 @@ void do_softirq(void)
}
EXPORT_SYMBOL(do_softirq);
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msi(struct pci_dev * pdev)
+{
+ if (ppc_md.enable_msi)
+ return ppc_md.enable_msi(pdev);
+ else
+ return -1;
+}
+
+void pci_disable_msi(struct pci_dev * pdev)
+{
+ if (ppc_md.disable_msi)
+ ppc_md.disable_msi(pdev);
+}
+
+void pci_scan_msi_device(struct pci_dev *dev) {}
+int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
+void pci_disable_msix(struct pci_dev *dev) {}
+void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
+void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
+void pci_no_msi(void) {}
+
+#endif
+
#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 2cbde865d4f5..c02deaab26c7 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -521,10 +521,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
current_weight = (resource >> 5 * 8) & 0xFF;
- pr_debug("%s: current_entitled = %lu, current_weight = %lu\n",
+ pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__FUNCTION__, current_entitled, current_weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %lu\n",
+ pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
__FUNCTION__, *new_entitled_ptr, *new_weight_ptr);
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ee166c586642..a8fa04ef27cd 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/paca.h>
+#include <asm/lmb.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
#include <asm/prom.h>
@@ -335,7 +336,105 @@ static void __init export_htab_values(void)
of_node_put(node);
}
+static struct property crashk_base_prop = {
+ .name = "linux,crashkernel-base",
+ .length = sizeof(unsigned long),
+ .value = (unsigned char *)&crashk_res.start,
+};
+
+static unsigned long crashk_size;
+
+static struct property crashk_size_prop = {
+ .name = "linux,crashkernel-size",
+ .length = sizeof(unsigned long),
+ .value = (unsigned char *)&crashk_size,
+};
+
+static void __init export_crashk_values(void)
+{
+ struct device_node *node;
+ struct property *prop;
+
+ node = of_find_node_by_path("/chosen");
+ if (!node)
+ return;
+
+ /* There might be existing crash kernel properties, but we can't
+ * be sure what's in them, so remove them. */
+ prop = of_find_property(node, "linux,crashkernel-base", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ prop = of_find_property(node, "linux,crashkernel-size", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ if (crashk_res.start != 0) {
+ prom_add_property(node, &crashk_base_prop);
+ crashk_size = crashk_res.end - crashk_res.start + 1;
+ prom_add_property(node, &crashk_size_prop);
+ }
+
+ of_node_put(node);
+}
+
void __init kexec_setup(void)
{
export_htab_values();
+ export_crashk_values();
+}
+
+static int __init early_parse_crashk(char *p)
+{
+ unsigned long size;
+
+ if (!p)
+ return 1;
+
+ size = memparse(p, &p);
+
+ if (*p == '@')
+ crashk_res.start = memparse(p + 1, &p);
+ else
+ crashk_res.start = KDUMP_KERNELBASE;
+
+ crashk_res.end = crashk_res.start + size - 1;
+
+ return 0;
+}
+early_param("crashkernel", early_parse_crashk);
+
+void __init reserve_crashkernel(void)
+{
+ unsigned long size;
+
+ if (crashk_res.start == 0)
+ return;
+
+ /* We might have got these values via the command line or the
+ * device tree, either way sanitise them now. */
+
+ size = crashk_res.end - crashk_res.start + 1;
+
+ if (crashk_res.start != KDUMP_KERNELBASE)
+ printk("Crash kernel location must be 0x%x\n",
+ KDUMP_KERNELBASE);
+
+ crashk_res.start = KDUMP_KERNELBASE;
+ size = PAGE_ALIGN(size);
+ crashk_res.end = crashk_res.start + size - 1;
+
+ /* Crash kernel trumps memory limit */
+ if (memory_limit && memory_limit <= crashk_res.end) {
+ memory_limit = crashk_res.end + 1;
+ printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
+ memory_limit);
+ }
+
+ lmb_reserve(crashk_res.start, size);
+}
+
+int overlaps_crashkernel(unsigned long start, unsigned long size)
+{
+ return (start + size) > crashk_res.start && start <= crashk_res.end;
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index be982023409e..01d3916c4cb1 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -216,7 +216,7 @@ _GLOBAL(call_setup_cpu)
lwz r4,0(r4)
add r4,r4,r3
lwz r5,CPU_SPEC_SETUP(r4)
- cmpi 0,r5,0
+ cmpwi 0,r5,0
add r5,r5,r3
beqlr
mtctr r5
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 2778cce058e2..e8883d42c43c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -482,7 +482,9 @@ _GLOBAL(identify_cpu)
sub r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
+ cmpdi 0,r4,0
add r4,r4,r5
+ beqlr
ld r4,0(r4)
add r4,r4,r5
mtctr r4
@@ -768,9 +770,6 @@ _GLOBAL(giveup_altivec)
#endif /* CONFIG_ALTIVEC */
-_GLOBAL(__setup_cpu_power3)
- blr
-
_GLOBAL(execve)
li r0,__NR_execve
sc
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index ada50aa5b600..6960f090991e 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -204,7 +204,7 @@ static void nvram_print_partitions(char * label)
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each(p, &nvram_part->partition) {
tmp_part = list_entry(p, struct nvram_partition, partition);
- printk(KERN_WARNING "%d \t%02x\t%02x\t%d\t%s\n",
+ printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index b129d2e4b759..b5431ccf1147 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -1113,9 +1113,10 @@ check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
int i;
int rc = 0;
-#define push_end(res, size) do { unsigned long __sz = (size) ; \
- res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \
- } while (0)
+#define push_end(res, mask) do { \
+ BUG_ON((mask+1) & mask); \
+ res->end = (res->end + mask) | mask; \
+} while (0)
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8;
@@ -1653,7 +1654,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
vma->vm_page_prot,
mmap_state, write_combine);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 4c4449be81ce..247937dd8b73 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,14 +42,6 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
-/*
- * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
- * devices we don't have access to.
- */
-unsigned long io_page_mask;
-
-EXPORT_SYMBOL(io_page_mask);
-
#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
@@ -235,8 +227,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
pci_setup_pci_controller(phb);
phb->arch_data = dev;
phb->is_dynamic = mem_init_done;
- if (dev)
+ if (dev) {
+ PHB_SET_NODE(phb, of_node_to_nid(dev));
add_linux_pci_domain(dev, phb);
+ }
return phb;
}
@@ -396,7 +390,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->current_state = 4; /* unknown power state */
- if (!strcmp(type, "pci")) {
+ if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
@@ -605,7 +599,7 @@ static int __init pcibios_init(void)
iSeries_pcibios_init();
#endif
- printk("PCI: Probing PCI hardware\n");
+ printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -630,14 +624,14 @@ static int __init pcibios_init(void)
/* Cache the location of the ISA bridge (if we have one) */
ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (ppc64_isabridge_dev != NULL)
- printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
+ printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
#ifdef CONFIG_PPC_MULTIPLATFORM
/* map in PCI I/O space */
phbs_remap_io();
#endif
- printk("PCI: Probing PCI hardware done\n");
+ printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0;
}
@@ -804,7 +798,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
+ printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
prot);
return __pgprot(prot);
@@ -883,7 +877,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
vma->vm_page_prot,
mmap_state, write_combine);
@@ -894,8 +887,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pci_show_devspec(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
@@ -907,13 +900,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
void pcibios_add_platform_entries(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC_MULTIPLATFORM
device_create_file(&pdev->dev, &dev_attr_devspec);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
}
#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -1104,8 +1094,6 @@ void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
hose->io_base_virt);
of_node_put(isa_dn);
- /* Allow all IO */
- io_page_mask = -1;
}
}
@@ -1212,7 +1200,7 @@ int remap_bus_range(struct pci_bus *bus)
return 1;
if (start_phys == 0)
return 1;
- printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
+ printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
if (__ioremap_explicit(start_phys, start_virt, size,
_PAGE_NO_CACHE | _PAGE_GUARDED))
return 1;
@@ -1232,27 +1220,13 @@ static void phbs_remap_io(void)
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long start, end, mask, offset;
+ unsigned long offset;
if (res->flags & IORESOURCE_IO) {
offset = (unsigned long)hose->io_base_virt - pci_io_base;
- start = res->start += offset;
- end = res->end += offset;
-
- /* Need to allow IO access to pages that are in the
- ISA range */
- if (start < MAX_ISA_PORT) {
- if (end > MAX_ISA_PORT)
- end = MAX_ISA_PORT;
-
- start >>= PAGE_SHIFT;
- end >>= PAGE_SHIFT;
-
- /* get the range of pages for the map */
- mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
- io_page_mask |= mask;
- }
+ res->start += offset;
+ res->end += offset;
} else if (res->flags & IORESOURCE_MEM) {
res->start += hose->pci_mem_offset;
res->end += hose->pci_mem_offset;
@@ -1442,3 +1416,12 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
return -EOPNOTSUPP;
}
+
+#ifdef CONFIG_NUMA
+int pcibus_to_node(struct pci_bus *bus)
+{
+ struct pci_controller *phb = pci_bus_to_host(bus);
+ return phb->node;
+}
+EXPORT_SYMBOL(pcibus_to_node);
+#endif
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
index e1a32f802c0b..72ce082ce738 100644
--- a/arch/powerpc/kernel/pci_direct_iommu.c
+++ b/arch/powerpc/kernel/pci_direct_iommu.c
@@ -82,13 +82,17 @@ static int pci_direct_dma_supported(struct device *dev, u64 mask)
return mask < 0x100000000ull;
}
+static struct dma_mapping_ops pci_direct_ops = {
+ .alloc_coherent = pci_direct_alloc_coherent,
+ .free_coherent = pci_direct_free_coherent,
+ .map_single = pci_direct_map_single,
+ .unmap_single = pci_direct_unmap_single,
+ .map_sg = pci_direct_map_sg,
+ .unmap_sg = pci_direct_unmap_sg,
+ .dma_supported = pci_direct_dma_supported,
+};
+
void __init pci_direct_iommu_init(void)
{
- pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
- pci_dma_ops.free_coherent = pci_direct_free_coherent;
- pci_dma_ops.map_single = pci_direct_map_single;
- pci_dma_ops.unmap_single = pci_direct_unmap_single;
- pci_dma_ops.map_sg = pci_direct_map_sg;
- pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
- pci_dma_ops.dma_supported = pci_direct_dma_supported;
+ pci_dma_ops = pci_direct_ops;
}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 12c4c9e9bbc7..1c18953514c3 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h>
#include <asm/pSeries_reconfig.h>
#include <asm/ppc-pci.h>
+#include <asm/firmware.h>
/*
* Traverse_func that inits the PCI fields of the device node.
@@ -59,6 +60,11 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
pdn->busno = (regs[0] >> 16) & 0xff;
pdn->devfn = (regs[0] >> 8) & 0xff;
}
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ u32 *busp = (u32 *)get_property(dn, "linux,subbus", NULL);
+ if (busp)
+ pdn->bussubno = *busp;
+ }
pdn->pci_ext_config_space = (type && *type == 1);
return NULL;
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
index c1d95e14bbed..0688b2534acb 100644
--- a/arch/powerpc/kernel/pci_iommu.c
+++ b/arch/powerpc/kernel/pci_iommu.c
@@ -44,16 +44,16 @@
*/
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-static inline struct iommu_table *devnode_table(struct device *dev)
+static inline struct iommu_table *device_to_table(struct device *hwdev)
{
struct pci_dev *pdev;
- if (!dev) {
+ if (!hwdev) {
pdev = ppc64_isabridge_dev;
if (!pdev)
return NULL;
} else
- pdev = to_pci_dev(dev);
+ pdev = to_pci_dev(hwdev);
return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
}
@@ -85,14 +85,15 @@ static inline unsigned long device_to_mask(struct device *hwdev)
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
- device_to_mask(hwdev), flag);
+ return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
+ device_to_mask(hwdev), flag,
+ pcibus_to_node(to_pci_dev(hwdev)->bus));
}
static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
+ iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -104,7 +105,7 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
- return iommu_map_single(devnode_table(hwdev), vaddr, size,
+ return iommu_map_single(device_to_table(hwdev), vaddr, size,
device_to_mask(hwdev), direction);
}
@@ -112,27 +113,27 @@ static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
- iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
+ iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- return iommu_map_sg(pdev, devnode_table(pdev), sglist,
+ return iommu_map_sg(pdev, device_to_table(pdev), sglist,
nelems, device_to_mask(pdev), direction);
}
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
+ iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{
- struct iommu_table *tbl = devnode_table(dev);
+ struct iommu_table *tbl = device_to_table(dev);
if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
@@ -147,13 +148,17 @@ static int pci_iommu_dma_supported(struct device *dev, u64 mask)
return 1;
}
+struct dma_mapping_ops pci_iommu_ops = {
+ .alloc_coherent = pci_iommu_alloc_coherent,
+ .free_coherent = pci_iommu_free_coherent,
+ .map_single = pci_iommu_map_single,
+ .unmap_single = pci_iommu_unmap_single,
+ .map_sg = pci_iommu_map_sg,
+ .unmap_sg = pci_iommu_unmap_sg,
+ .dma_supported = pci_iommu_dma_supported,
+};
+
void pci_iommu_init(void)
{
- pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
- pci_dma_ops.free_coherent = pci_iommu_free_coherent;
- pci_dma_ops.map_single = pci_iommu_map_single;
- pci_dma_ops.unmap_single = pci_iommu_unmap_single;
- pci_dma_ops.map_sg = pci_iommu_map_sg;
- pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
- pci_dma_ops.dma_supported = pci_iommu_dma_supported;
+ pci_dma_ops = pci_iommu_ops;
}
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index 3c2cf661f6d9..2ab8f2be911e 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -52,7 +52,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
- if (!machine_is(pseries) && !machine_is(cell))
+ if (!of_find_node_by_path("/rtas"))
return 0;
if (!proc_mkdir("rtas", root))
@@ -115,8 +115,6 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{
struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
- vma->vm_flags |= VM_SHM | VM_LOCKED;
-
if ((vma->vm_end - vma->vm_start) > dp->size)
return -EINVAL;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 2dd47d2dd998..e4732459c485 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -708,6 +708,61 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
return put_user(val, (unsigned int __user *) adr);
}
+int set_endian(struct task_struct *tsk, unsigned int val)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+
+ if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
+ (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (val == PR_ENDIAN_BIG)
+ regs->msr &= ~MSR_LE;
+ else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
+ regs->msr |= MSR_LE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int get_endian(struct task_struct *tsk, unsigned long adr)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned int val;
+
+ if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
+ !cpu_has_feature(CPU_FTR_REAL_LE))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (regs->msr & MSR_LE) {
+ if (cpu_has_feature(CPU_FTR_REAL_LE))
+ val = PR_ENDIAN_LITTLE;
+ else
+ val = PR_ENDIAN_PPC_LITTLE;
+ } else
+ val = PR_ENDIAN_BIG;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ tsk->thread.align_ctl = val;
+ return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+ return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
+}
+
#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
int sys_clone(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9a07f97f0712..483455c5bb02 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -50,6 +50,7 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
+#include <asm/kexec.h>
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -836,6 +837,42 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
return mem;
}
+static int __init early_parse_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = PAGE_ALIGN(memparse(p, &p));
+ DBG("memory limit = 0x%lx\n", memory_limit);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+/*
+ * The device tree may be allocated below our memory limit, or inside the
+ * crash kernel region for kdump. If so, move it out now.
+ */
+static void move_device_tree(void)
+{
+ unsigned long start, size;
+ void *p;
+
+ DBG("-> move_device_tree\n");
+
+ start = __pa(initial_boot_params);
+ size = initial_boot_params->totalsize;
+
+ if ((memory_limit && (start + size) > memory_limit) ||
+ overlaps_crashkernel(start, size)) {
+ p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
+ memcpy(p, initial_boot_params, size);
+ initial_boot_params = (struct boot_param_header *)p;
+ DBG("Moved device tree to 0x%p\n", p);
+ }
+
+ DBG("<- move_device_tree\n");
+}
/**
* unflattens the device-tree passed by the firmware, creating the
@@ -911,7 +948,10 @@ static struct ibm_pa_feature {
{CPU_FTR_CTRL, 0, 0, 3, 0},
{CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
{CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
+#if 0
+ /* put this back once we know how to test if firmware does 64k IO */
{CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
+#endif
};
static void __init check_cpu_pa_features(unsigned long node)
@@ -1070,6 +1110,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
iommu_force_on = 1;
#endif
+ /* mem=x on the command line is the preferred mechanism */
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop)
memory_limit = *lprop;
@@ -1123,17 +1164,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
DBG("Command line is: %s\n", cmd_line);
- if (strstr(cmd_line, "mem=")) {
- char *p, *q;
-
- for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
- q = p + 4;
- if (p > cmd_line && p[-1] != ' ')
- continue;
- memory_limit = memparse(q, &q);
- }
- }
-
/* break now */
return 1;
}
@@ -1237,9 +1267,17 @@ static void __init early_reserve_mem(void)
{
u64 base, size;
u64 *reserve_map;
+ unsigned long self_base;
+ unsigned long self_size;
reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
initial_boot_params->off_mem_rsvmap);
+
+ /* before we do anything, lets reserve the dt blob */
+ self_base = __pa((unsigned long)initial_boot_params);
+ self_size = initial_boot_params->totalsize;
+ lmb_reserve(self_base, self_size);
+
#ifdef CONFIG_PPC32
/*
* Handle the case where we might be booting from an old kexec
@@ -1254,6 +1292,9 @@ static void __init early_reserve_mem(void)
size_32 = *(reserve_map_32++);
if (size_32 == 0)
break;
+ /* skip if the reservation is for the blob */
+ if (base_32 == self_base && size_32 == self_size)
+ continue;
DBG("reserving: %x -> %x\n", base_32, size_32);
lmb_reserve(base_32, size_32);
}
@@ -1265,6 +1306,9 @@ static void __init early_reserve_mem(void)
size = *(reserve_map++);
if (size == 0)
break;
+ /* skip if the reservation is for the blob */
+ if (base == self_base && size == self_size)
+ continue;
DBG("reserving: %llx -> %llx\n", base, size);
lmb_reserve(base, size);
}
@@ -1292,18 +1336,26 @@ void __init early_init_devtree(void *params)
lmb_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
- lmb_enforce_memory_limit(memory_limit);
- lmb_analyze();
- DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+ /* Save command line for /proc/cmdline and then parse parameters */
+ strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
+ parse_early_param();
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
-#ifdef CONFIG_CRASH_DUMP
- lmb_reserve(0, KDUMP_RESERVE_LIMIT);
-#endif
+ reserve_kdump_trampoline();
+ reserve_crashkernel();
early_reserve_mem();
+ lmb_enforce_memory_limit(memory_limit);
+ lmb_analyze();
+
+ DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+
+ /* We may need to relocate the flat tree, do it now.
+ * FIXME .. and the initrd too? */
+ move_device_tree();
+
DBG("Scanning CPUs ...\n");
/* Retreive CPU related informations from the flat tree
@@ -2053,29 +2105,46 @@ int prom_update_property(struct device_node *np,
return 0;
}
-#ifdef CONFIG_KEXEC
-/* We may have allocated the flat device tree inside the crash kernel region
- * in prom_init. If so we need to move it out into regular memory. */
-void kdump_move_device_tree(void)
-{
- unsigned long start, end;
- struct boot_param_header *new;
-
- start = __pa((unsigned long)initial_boot_params);
- end = start + initial_boot_params->totalsize;
-
- if (end < crashk_res.start || start > crashk_res.end)
- return;
- new = (struct boot_param_header*)
- __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE));
-
- memcpy(new, initial_boot_params, initial_boot_params->totalsize);
+/* Find the device node for a given logical cpu number, also returns the cpu
+ * local thread number (index in ibm,interrupt-server#s) if relevant and
+ * asked for (non NULL)
+ */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+{
+ int hardid;
+ struct device_node *np;
- initial_boot_params = new;
+ hardid = get_hard_smp_processor_id(cpu);
- DBG("Flat device tree blob moved to %p\n", initial_boot_params);
+ for_each_node_by_type(np, "cpu") {
+ u32 *intserv;
+ unsigned int plen, t;
- /* XXX should we unreserve the old DT? */
+ /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
+ * fallback to "reg" property and assume no threads
+ */
+ intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
+ &plen);
+ if (intserv == NULL) {
+ u32 *reg = (u32 *)get_property(np, "reg", NULL);
+ if (reg == NULL)
+ continue;
+ if (*reg == hardid) {
+ if (thread)
+ *thread = 0;
+ return np;
+ }
+ } else {
+ plen /= sizeof(u32);
+ for (t = 0; t < plen; t++) {
+ if (hardid == intserv[t]) {
+ if (thread)
+ *thread = t;
+ return np;
+ }
+ }
+ }
+ }
+ return NULL;
}
-#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 41e9ab40cd54..8c28eb0cbdac 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -194,19 +194,12 @@ static int __initdata of_platform;
static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
-static unsigned long __initdata prom_memory_limit;
-
static unsigned long __initdata alloc_top;
static unsigned long __initdata alloc_top_high;
static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top;
-#ifdef CONFIG_KEXEC
-static unsigned long __initdata prom_crashk_base;
-static unsigned long __initdata prom_crashk_size;
-#endif
-
static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt;
@@ -574,7 +567,7 @@ static void __init early_cmdline_parse(void)
if ((long)_prom->chosen > 0)
l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
#ifdef CONFIG_CMDLINE
- if (l == 0) /* dbl check */
+ if (l <= 0 || p[0] == '\0') /* dbl check */
strlcpy(RELOC(prom_cmd_line),
RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
#endif /* CONFIG_CMDLINE */
@@ -593,45 +586,6 @@ static void __init early_cmdline_parse(void)
RELOC(iommu_force_on) = 1;
}
#endif
-
- opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
- if (opt) {
- opt += 4;
- RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
-#ifdef CONFIG_PPC64
- /* Align to 16 MB == size of ppc64 large page */
- RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
-#endif
- }
-
-#ifdef CONFIG_KEXEC
- /*
- * crashkernel=size@addr specifies the location to reserve for
- * crash kernel.
- */
- opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
- if (opt) {
- opt += 12;
- RELOC(prom_crashk_size) =
- prom_memparse(opt, (const char **)&opt);
-
- if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
- RELOC(prom_crashk_size)) {
- prom_printf("Warning: crashkernel size is not "
- "aligned to 16MB\n");
- }
-
- /*
- * At present, the crash kernel always run at 32MB.
- * Just ignore whatever user passed.
- */
- RELOC(prom_crashk_base) = 0x2000000;
- if (*opt == '@') {
- prom_printf("Warning: PPC64 kdump kernel always runs "
- "at 32 MB\n");
- }
- }
-#endif
}
#ifdef CONFIG_PPC_PSERIES
@@ -822,6 +776,7 @@ static void __init prom_send_capabilities(void)
/* try calling the ibm,client-architecture-support method */
if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
+ root,
ADDR(ibm_architecture_vec)) == 0) {
/* the call exists... */
if (ret)
@@ -1115,29 +1070,6 @@ static void __init prom_init_mem(void)
}
/*
- * If prom_memory_limit is set we reduce the upper limits *except* for
- * alloc_top_high. This must be the real top of RAM so we can put
- * TCE's up there.
- */
-
- RELOC(alloc_top_high) = RELOC(ram_top);
-
- if (RELOC(prom_memory_limit)) {
- if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
- prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
- prom_printf("Ignoring mem=%x >= ram_top.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else {
- RELOC(ram_top) = RELOC(prom_memory_limit);
- RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
- }
- }
-
- /*
* Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR.
* Some RS64 machines have buggy firmware where claims up at
@@ -1149,20 +1081,14 @@ static void __init prom_init_mem(void)
RELOC(rmo_top) = RELOC(ram_top);
RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
RELOC(alloc_top) = RELOC(rmo_top);
+ RELOC(alloc_top_high) = RELOC(ram_top);
prom_printf("memory layout at init:\n");
- prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
prom_printf(" ram_top : %x\n", RELOC(ram_top));
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base)) {
- prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
- prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
- }
-#endif
}
@@ -1348,16 +1274,10 @@ static void __init prom_initialize_tce_table(void)
reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
- if (RELOC(prom_memory_limit)) {
- /*
- * We align the start to a 16MB boundary so we can map
- * the TCE area using large pages if possible.
- * The end should be the top of RAM so no need to align it.
- */
- RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
- 0x1000000);
- RELOC(prom_tce_alloc_end) = local_alloc_top;
- }
+ /* These are only really needed if there is a memory limit in
+ * effect, but we don't know so export them always. */
+ RELOC(prom_tce_alloc_start) = local_alloc_bottom;
+ RELOC(prom_tce_alloc_end) = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
@@ -1622,6 +1542,15 @@ static int __init prom_find_machine_type(void)
if (strstr(p, RELOC("Power Macintosh")) ||
strstr(p, RELOC("MacRISC")))
return PLATFORM_POWERMAC;
+#ifdef CONFIG_PPC64
+ /* We must make sure we don't detect the IBM Cell
+ * blades as pSeries due to some firmware issues,
+ * so we do it here.
+ */
+ if (strstr(p, RELOC("IBM,CBEA")) ||
+ strstr(p, RELOC("IBM,CPBW-1.0")))
+ return PLATFORM_GENERIC;
+#endif /* CONFIG_PPC64 */
i += sl + 1;
}
}
@@ -2031,11 +1960,7 @@ static void __init flatten_device_tree(void)
/* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10;
- /* Reserve the whole thing and copy the reserve map in, we
- * also bump mem_reserve_cnt to cause further reservations to
- * fail since it's too late.
- */
- reserve_mem(RELOC(dt_header_start), hdr->totalsize);
+ /* Copy the reserve map in */
memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
#ifdef DEBUG_PROM
@@ -2048,6 +1973,9 @@ static void __init flatten_device_tree(void)
RELOC(mem_reserve_map)[i].size);
}
#endif
+ /* Bump mem_reserve_cnt to cause further reservations to fail
+ * since it's too late.
+ */
RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
@@ -2270,10 +2198,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_mem();
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base))
- reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
-#endif
/*
* Determine which cpu is actually running right _now_
*/
@@ -2307,10 +2231,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* Fill in some infos for use by the kernel later on
*/
- if (RELOC(prom_memory_limit))
- prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
- &RELOC(prom_memory_limit),
- sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
@@ -2330,16 +2250,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
}
#endif
-#ifdef CONFIG_KEXEC
- if (RELOC(prom_crashk_base)) {
- prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
- PTRRELOC(&prom_crashk_base),
- sizeof(RELOC(prom_crashk_base)));
- prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
- PTRRELOC(&prom_crashk_size),
- sizeof(RELOC(prom_crashk_size)));
- }
-#endif
/*
* Fixup any known bugs in the device-tree
*/
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 3934c227549b..45df420383cc 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -548,3 +548,28 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
return __of_address_to_resource(dev, addrp, size, flags, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size)
+{
+ u32 *dma_window, cells;
+ unsigned char *prop;
+
+ dma_window = (u32 *)dma_window_prop;
+
+ /* busno is always one cell */
+ *busno = *(dma_window++);
+
+ prop = get_property(dn, "ibm,#dma-address-cells", NULL);
+ if (!prop)
+ prop = get_property(dn, "#address-cells", NULL);
+
+ cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn);
+ *phys = of_read_addr(dma_window, cells);
+
+ dma_window += cells;
+
+ prop = get_property(dn, "ibm,#dma-size-cells", NULL);
+ cells = prop ? *(u32 *)prop : prom_n_size_cells(dn);
+ *size = of_read_addr(dma_window, cells);
+}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 4a677d1bd4ef..5563e2e7d89c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -404,7 +404,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_detach(child, data);
break;
-#ifdef CONFIG_PPC64
case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
int i;
unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
@@ -468,7 +467,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
break;
}
-#endif /* CONFIG_PPC64 */
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 34d073fb6091..77578c093dda 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -14,19 +14,20 @@
unsigned long __init rtas_get_boot_time(void)
{
int ret[8];
- int error, wait_time;
+ int error;
+ unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
- wait_time = rtas_extended_busy_delay_time(error);
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
/* This is boot time so we spin. */
udelay(wait_time*1000);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
@@ -44,24 +45,25 @@ unsigned long __init rtas_get_boot_time(void)
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
- int error, wait_time;
+ int error;
+ unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
if (in_interrupt() && printk_ratelimit()) {
memset(rtc_tm, 0, sizeof(struct rtc_time));
printk(KERN_WARNING "error: reading clock"
" would delay interrupt\n");
return; /* delay not allowed */
}
- wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
@@ -88,14 +90,14 @@ int rtas_set_rtc_time(struct rtc_time *tm)
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
- if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
+
+ wait_time = rtas_busy_delay_time(error);
+ if (wait_time) {
if (in_interrupt())
return 1; /* probably decrementer */
- wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
- error = RTAS_CLOCK_BUSY;
}
- } while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
+ } while (wait_time && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 0112318213ab..17dc79198515 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -370,24 +370,36 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
return ret;
}
-/* Given an RTAS status code of 990n compute the hinted delay of 10^n
- * (last digit) milliseconds. For now we bound at n=5 (100 sec).
+/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
+ * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
*/
-unsigned int rtas_extended_busy_delay_time(int status)
+unsigned int rtas_busy_delay_time(int status)
{
- int order = status - 9900;
- unsigned long ms;
+ int order;
+ unsigned int ms = 0;
+
+ if (status == RTAS_BUSY) {
+ ms = 1;
+ } else if (status >= 9900 && status <= 9905) {
+ order = status - 9900;
+ for (ms = 1; order > 0; order--)
+ ms *= 10;
+ }
- if (order < 0)
- order = 0; /* RTC depends on this for -2 clock busy */
- else if (order > 5)
- order = 5; /* bound */
+ return ms;
+}
- /* Use microseconds for reasonable accuracy */
- for (ms = 1; order > 0; order--)
- ms *= 10;
+/* For an RTAS busy status code, perform the hinted delay. */
+unsigned int rtas_busy_delay(int status)
+{
+ unsigned int ms;
- return ms;
+ might_sleep();
+ ms = rtas_busy_delay_time(status);
+ if (ms)
+ msleep(ms);
+
+ return ms;
}
int rtas_error_rc(int rtas_rc)
@@ -438,22 +450,14 @@ int rtas_get_power_level(int powerdomain, int *level)
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
int token = rtas_token("set-power-level");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -463,22 +467,14 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
int rtas_get_sensor(int sensor, int index, int *state)
{
int token = rtas_token("get-sensor-state");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 2, 2, state, sensor, index);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -488,23 +484,14 @@ int rtas_get_sensor(int sensor, int index, int *state)
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_token("set-indicator");
- unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
- while (1) {
+ do {
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
- if (rc == RTAS_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- }
- else
- break;
- }
+ } while (rtas_busy_delay(rc));
if (rc < 0)
return rtas_error_rc(rc);
@@ -555,13 +542,11 @@ void rtas_os_term(char *str)
do {
status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
__pa(rtas_os_term_buf));
+ } while (rtas_busy_delay(status));
- if (status == RTAS_BUSY)
- udelay(1);
- else if (status != 0)
- printk(KERN_EMERG "ibm,os-term call failed %d\n",
+ if (status != 0)
+ printk(KERN_EMERG "ibm,os-term call failed %d\n",
status);
- } while (status == RTAS_BUSY);
}
static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
@@ -608,9 +593,31 @@ out:
static int rtas_ibm_suspend_me(struct rtas_args *args)
{
int i;
+ long state;
+ long rc;
+ unsigned long dummy;
struct rtas_suspend_me_data data;
+ /* Make sure the state is valid */
+ rc = plpar_hcall(H_VASI_STATE,
+ ((u64)args->args[0] << 32) | args->args[1],
+ 0, 0, 0,
+ &state, &dummy, &dummy);
+
+ if (rc) {
+ printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
+ return rc;
+ } else if (state == H_VASI_ENABLED) {
+ args->args[args->nargs] = RTAS_NOT_SUSPENDABLE;
+ return 0;
+ } else if (state != H_VASI_SUSPENDING) {
+ printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
+ state);
+ args->args[args->nargs] = -1;
+ return 0;
+ }
+
data.waiting = 1;
data.args = args;
@@ -789,7 +796,8 @@ EXPORT_SYMBOL(rtas_token);
EXPORT_SYMBOL(rtas_call);
EXPORT_SYMBOL(rtas_data_buf);
EXPORT_SYMBOL(rtas_data_buf_lock);
-EXPORT_SYMBOL(rtas_extended_busy_delay_time);
+EXPORT_SYMBOL(rtas_busy_delay_time);
+EXPORT_SYMBOL(rtas_busy_delay);
EXPORT_SYMBOL(rtas_get_sensor);
EXPORT_SYMBOL(rtas_get_power_level);
EXPORT_SYMBOL(rtas_set_power_level);
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index aaf384c3f04a..1442b63a75da 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -365,20 +365,12 @@ static int rtas_excl_release(struct inode *inode, struct file *file)
static void manage_flash(struct rtas_manage_flash_t *args_buf)
{
- unsigned int wait_time;
s32 rc;
- while (1) {
+ do {
rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
1, NULL, args_buf->op);
- if (rc == RTAS_RC_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
args_buf->status = rc;
}
@@ -451,27 +443,18 @@ static ssize_t manage_flash_write(struct file *file, const char __user *buf,
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_token("ibm,validate-flash-image");
- unsigned int wait_time;
int update_results;
s32 rc;
rc = 0;
- while(1) {
+ do {
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE);
rc = rtas_call(token, 2, 2, &update_results,
(u32) __pa(rtas_data_buf), args_buf->buf_size);
memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
-
- if (rc == RTAS_RC_BUSY)
- udelay(1);
- else if (rtas_is_extended_busy(rc)) {
- wait_time = rtas_extended_busy_delay_time(rc);
- udelay(wait_time * 1000);
- } else
- break;
- }
+ } while (rtas_busy_delay(rc));
args_buf->status = rc;
args_buf->update_results = update_results;
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 57b539a03fa9..6eb7e49b394a 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -313,7 +313,9 @@ unsigned long __init find_and_init_phbs(void)
for (node = of_get_next_child(root, NULL);
node != NULL;
node = of_get_next_child(root, node)) {
- if (node->type == NULL || strcmp(node->type, "pci") != 0)
+
+ if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
+ strcmp(node->type, "pciex") != 0))
continue;
phb = pcibios_alloc_controller(node);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 684ab1d49c65..bd328123af75 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -443,6 +443,7 @@ void __init smp_setup_cpu_maps(void)
}
#endif /* CONFIG_SMP */
+int __initdata do_early_xmon;
#ifdef CONFIG_XMON
static int __init early_xmon(char *p)
{
@@ -456,7 +457,7 @@ static int __init early_xmon(char *p)
return 0;
}
xmon_init(1);
- debugger(NULL);
+ do_early_xmon = 1;
return 0;
}
@@ -524,3 +525,20 @@ int check_legacy_ioport(unsigned long base_port)
return ppc_md.check_legacy_ioport(base_port);
}
EXPORT_SYMBOL(check_legacy_ioport);
+
+static int ppc_panic_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ppc_md.panic(ptr); /* May not return */
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_panic_block = {
+ .notifier_call = ppc_panic_event,
+ .priority = INT_MIN /* may not return; must be done last */
+};
+
+void __init setup_panic(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+}
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 2ebba755272e..4c67ad7fae08 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -2,5 +2,8 @@
#define _POWERPC_KERNEL_SETUP_H
void check_for_initrd(void);
+void do_init_bootmem(void);
+void setup_panic(void);
+extern int do_early_xmon;
#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 69ac25701344..e5a44812441a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -131,12 +131,6 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
- /* Check default command line */
-#ifdef CONFIG_CMDLINE
- if (cmd_line[0] == 0)
- strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
-#endif /* CONFIG_CMDLINE */
-
probe_machine();
#ifdef CONFIG_6xx
@@ -235,7 +229,7 @@ arch_initcall(ppc_init);
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
- extern void do_init_bootmem(void);
+ *cmdline_p = cmd_line;
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy = 500000000 / HZ;
@@ -285,16 +279,16 @@ void __init setup_arch(char **cmdline_p)
/* reboot on panic */
panic_timeout = 180;
+ if (ppc_md.panic)
+ setup_panic();
+
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
- *cmdline_p = cmd_line;
-
- parse_early_param();
+ if (do_early_xmon)
+ debugger(NULL);
/* set up the bootmem stuff with available memory */
do_init_bootmem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4467c49903b6..78f3a5fd43f6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -100,12 +100,6 @@ unsigned long SYSRQ_KEY;
#endif /* CONFIG_MAGIC_SYSRQ */
-static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
-static struct notifier_block ppc64_panic_block = {
- .notifier_call = ppc64_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
#ifdef CONFIG_SMP
static int smt_enabled_cmdline;
@@ -199,9 +193,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Probe the machine type */
probe_machine();
-#ifdef CONFIG_CRASH_DUMP
- kdump_setup();
-#endif
+ setup_kdump_trampoline();
DBG("Found, Initializing memory management...\n");
@@ -353,9 +345,6 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
-#ifdef CONFIG_KEXEC
- kdump_move_device_tree();
-#endif
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
@@ -420,10 +409,8 @@ void __init setup_system(void)
*/
register_early_udbg_console();
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
-
- parse_early_param();
+ if (do_early_xmon)
+ debugger(NULL);
check_smt_enabled();
smp_setup_cpu_maps();
@@ -456,13 +443,6 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
-static int ppc64_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- ppc_md.panic((char *)ptr); /* May not return */
- return NOTIFY_DONE;
-}
-
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
@@ -517,8 +497,6 @@ static void __init emergency_stack_init(void)
*/
void __init setup_arch(char **cmdline_p)
{
- extern void do_init_bootmem(void);
-
ppc64_boot_msg(0x12, "Setup Arch");
*cmdline_p = cmd_line;
@@ -535,8 +513,7 @@ void __init setup_arch(char **cmdline_p)
panic_timeout = 180;
if (ppc_md.panic)
- atomic_notifier_chain_register(&panic_notifier_list,
- &ppc64_panic_block);
+ setup_panic();
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 01e3c08cb550..d73b25e22fca 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -419,9 +419,7 @@ static long restore_user_regs(struct pt_regs *regs,
{
long err;
unsigned int save_r2 = 0;
-#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
unsigned long msr;
-#endif
/*
* restore general registers but not including MSR or SOFTE. Also
@@ -430,11 +428,16 @@ static long restore_user_regs(struct pt_regs *regs,
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
+ err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
if (err)
return 1;
+ /* if doing signal return, restore the previous little-endian mode */
+ if (sig)
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
/*
* Do this before updating the thread state in
* current->thread.fpr/vr/evr. That way, if we get preempted
@@ -455,7 +458,7 @@ static long restore_user_regs(struct pt_regs *regs,
/* force the process to reload the altivec registers from
current->thread when it next does altivec instructions */
regs->msr &= ~MSR_VEC;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
+ if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
@@ -472,7 +475,7 @@ static long restore_user_regs(struct pt_regs *regs,
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
regs->msr &= ~MSR_SPE;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
+ if (msr & MSR_SPE) {
/* restore spe registers from the stack */
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
@@ -757,10 +760,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
- if (vdso32_rt_sigtramp && current->thread.vdso_base) {
+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, frame, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
if (save_user_regs(regs, frame, __NR_rt_sigreturn))
goto badframe;
@@ -777,6 +780,8 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[5] = (unsigned long) &rt_sf->uc;
regs->gpr[6] = (unsigned long) rt_sf;
regs->nip = (unsigned long) ka->sa.sa_handler;
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
@@ -803,10 +808,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
if (__get_user(cmcp, &ucp->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
+ /* no need to check access_ok(mcp), since mcp < 4GB */
}
#else
if (__get_user(mcp, &ucp->uc_regs))
return -EFAULT;
+ if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
+ return -EFAULT;
#endif
restore_sigmask(&set);
if (restore_user_regs(regs, mcp, sig))
@@ -908,13 +916,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
{
struct sig_dbg_op op;
int i;
+ unsigned char tmp;
unsigned long new_msr = regs->msr;
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
unsigned long new_dbcr0 = current->thread.dbcr0;
#endif
for (i=0; i<ndbg; i++) {
- if (__copy_from_user(&op, dbg, sizeof(op)))
+ if (copy_from_user(&op, dbg + i, sizeof(op)))
return -EFAULT;
switch (op.dbg_type) {
case SIG_DBG_SINGLE_STEPPING:
@@ -959,6 +968,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
current->thread.dbcr0 = new_dbcr0;
#endif
+ if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
+ || __get_user(tmp, (u8 __user *) ctx)
+ || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
+ return -EFAULT;
+
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
@@ -1029,10 +1043,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
|| __put_user(sig, &sc->signal))
goto badframe;
- if (vdso32_sigtramp && current->thread.vdso_base) {
+ if (vdso32_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, &frame->mctx, 0))
goto badframe;
- regs->link = current->thread.vdso_base + vdso32_sigtramp;
+ regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
} else {
if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
goto badframe;
@@ -1047,6 +1061,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[3] = sig;
regs->gpr[4] = (unsigned long) sc;
regs->nip = (unsigned long) ka->sa.sa_handler;
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27f65b95184d..6e75d7ab6d4d 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -141,9 +141,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
unsigned long err = 0;
unsigned long save_r13 = 0;
elf_greg_t *gregs = (elf_greg_t *)regs;
-#ifdef CONFIG_ALTIVEC
unsigned long msr;
-#endif
int i;
/* If this is not a signal return, we preserve the TLS in r13 */
@@ -154,7 +152,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __copy_from_user(regs, &sc->gp_regs,
PT_MSR*sizeof(unsigned long));
- /* skip MSR and SOFTE */
+ /* get MSR separately, transfer the LE bit if doing signal return */
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ if (sig)
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
+ /* skip SOFTE */
for (i = PT_MSR+1; i <= PT_RESULT; i++) {
if (i == PT_SOFTE)
continue;
@@ -179,9 +182,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
- err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
if (err)
return err;
+ if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
+ return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != 0 && (msr & MSR_VEC) != 0)
err |= __copy_from_user(current->thread.vr, v_regs,
@@ -394,8 +398,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
- if (vdso64_rt_sigtramp && current->thread.vdso_base) {
- regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
@@ -410,6 +414,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Set up "regs" so we "return" to the signal handler. */
err |= get_user(regs->nip, &funct_desc_ptr->entry);
+ /* enter the signal handler in big-endian mode */
+ regs->msr &= ~MSR_LE;
regs->gpr[1] = newsp;
err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
regs->gpr[3] = signr;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 530f7dba0bd2..c5d179d4f818 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -492,7 +492,7 @@ int __devinit __cpu_up(unsigned int cpu)
* -- Cort
*/
if (system_state < SYSTEM_RUNNING)
- for (c = 5000; c && !cpu_callin_map[cpu]; c--)
+ for (c = 50000; c && !cpu_callin_map[cpu]; c--)
udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
else
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 26ed1f5ef16e..ee75ccf1a802 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -32,6 +32,10 @@
#define SYS32ONLY(func) .long sys_##func
#define SYSX(f, f3264, f32) .long f32
#endif
+#define SYSCALL_SPU(func) SYSCALL(func)
+#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
+#define PPC_SYS_SPU(func) PPC_SYS(func)
+#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
#ifdef CONFIG_PPC64
#define sys_sigpending sys_ni_syscall
@@ -39,309 +43,4 @@
#endif
_GLOBAL(sys_call_table)
-SYSCALL(restart_syscall)
-SYSCALL(exit)
-PPC_SYS(fork)
-SYSCALL(read)
-SYSCALL(write)
-COMPAT_SYS(open)
-SYSCALL(close)
-COMPAT_SYS(waitpid)
-COMPAT_SYS(creat)
-SYSCALL(link)
-SYSCALL(unlink)
-COMPAT_SYS(execve)
-SYSCALL(chdir)
-COMPAT_SYS(time)
-SYSCALL(mknod)
-SYSCALL(chmod)
-SYSCALL(lchown)
-SYSCALL(ni_syscall)
-OLDSYS(stat)
-SYSX(sys_lseek,ppc32_lseek,sys_lseek)
-SYSCALL(getpid)
-COMPAT_SYS(mount)
-SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
-SYSCALL(setuid)
-SYSCALL(getuid)
-COMPAT_SYS(stime)
-COMPAT_SYS(ptrace)
-SYSCALL(alarm)
-OLDSYS(fstat)
-COMPAT_SYS(pause)
-COMPAT_SYS(utime)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS(access)
-COMPAT_SYS(nice)
-SYSCALL(ni_syscall)
-SYSCALL(sync)
-COMPAT_SYS(kill)
-SYSCALL(rename)
-COMPAT_SYS(mkdir)
-SYSCALL(rmdir)
-SYSCALL(dup)
-SYSCALL(pipe)
-COMPAT_SYS(times)
-SYSCALL(ni_syscall)
-SYSCALL(brk)
-SYSCALL(setgid)
-SYSCALL(getgid)
-SYSCALL(signal)
-SYSCALL(geteuid)
-SYSCALL(getegid)
-SYSCALL(acct)
-SYSCALL(umount)
-SYSCALL(ni_syscall)
-COMPAT_SYS(ioctl)
-COMPAT_SYS(fcntl)
-SYSCALL(ni_syscall)
-COMPAT_SYS(setpgid)
-SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
-COMPAT_SYS(umask)
-SYSCALL(chroot)
-SYSCALL(ustat)
-SYSCALL(dup2)
-SYSCALL(getppid)
-SYSCALL(getpgrp)
-SYSCALL(setsid)
-SYS32ONLY(sigaction)
-SYSCALL(sgetmask)
-COMPAT_SYS(ssetmask)
-SYSCALL(setreuid)
-SYSCALL(setregid)
-SYS32ONLY(sigsuspend)
-COMPAT_SYS(sigpending)
-COMPAT_SYS(sethostname)
-COMPAT_SYS(setrlimit)
-COMPAT_SYS(old_getrlimit)
-COMPAT_SYS(getrusage)
-COMPAT_SYS(gettimeofday)
-COMPAT_SYS(settimeofday)
-COMPAT_SYS(getgroups)
-COMPAT_SYS(setgroups)
-SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
-SYSCALL(symlink)
-OLDSYS(lstat)
-COMPAT_SYS(readlink)
-SYSCALL(uselib)
-SYSCALL(swapon)
-SYSCALL(reboot)
-SYSX(sys_ni_syscall,old32_readdir,old_readdir)
-SYSCALL(mmap)
-SYSCALL(munmap)
-SYSCALL(truncate)
-SYSCALL(ftruncate)
-SYSCALL(fchmod)
-SYSCALL(fchown)
-COMPAT_SYS(getpriority)
-COMPAT_SYS(setpriority)
-SYSCALL(ni_syscall)
-COMPAT_SYS(statfs)
-COMPAT_SYS(fstatfs)
-SYSCALL(ni_syscall)
-COMPAT_SYS(socketcall)
-COMPAT_SYS(syslog)
-COMPAT_SYS(setitimer)
-COMPAT_SYS(getitimer)
-COMPAT_SYS(newstat)
-COMPAT_SYS(newlstat)
-COMPAT_SYS(newfstat)
-SYSX(sys_ni_syscall,sys_uname,sys_uname)
-SYSCALL(ni_syscall)
-SYSCALL(vhangup)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS(wait4)
-SYSCALL(swapoff)
-COMPAT_SYS(sysinfo)
-COMPAT_SYS(ipc)
-SYSCALL(fsync)
-SYS32ONLY(sigreturn)
-PPC_SYS(clone)
-COMPAT_SYS(setdomainname)
-PPC_SYS(newuname)
-SYSCALL(ni_syscall)
-COMPAT_SYS(adjtimex)
-SYSCALL(mprotect)
-SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
-SYSCALL(ni_syscall)
-SYSCALL(init_module)
-SYSCALL(delete_module)
-SYSCALL(ni_syscall)
-SYSCALL(quotactl)
-COMPAT_SYS(getpgid)
-SYSCALL(fchdir)
-SYSCALL(bdflush)
-COMPAT_SYS(sysfs)
-SYSX(ppc64_personality,ppc64_personality,sys_personality)
-SYSCALL(ni_syscall)
-SYSCALL(setfsuid)
-SYSCALL(setfsgid)
-SYSCALL(llseek)
-COMPAT_SYS(getdents)
-SYSX(sys_select,ppc32_select,ppc_select)
-SYSCALL(flock)
-SYSCALL(msync)
-COMPAT_SYS(readv)
-COMPAT_SYS(writev)
-COMPAT_SYS(getsid)
-SYSCALL(fdatasync)
-COMPAT_SYS(sysctl)
-SYSCALL(mlock)
-SYSCALL(munlock)
-SYSCALL(mlockall)
-SYSCALL(munlockall)
-COMPAT_SYS(sched_setparam)
-COMPAT_SYS(sched_getparam)
-COMPAT_SYS(sched_setscheduler)
-COMPAT_SYS(sched_getscheduler)
-SYSCALL(sched_yield)
-COMPAT_SYS(sched_get_priority_max)
-COMPAT_SYS(sched_get_priority_min)
-COMPAT_SYS(sched_rr_get_interval)
-COMPAT_SYS(nanosleep)
-SYSCALL(mremap)
-SYSCALL(setresuid)
-SYSCALL(getresuid)
-SYSCALL(ni_syscall)
-SYSCALL(poll)
-COMPAT_SYS(nfsservctl)
-SYSCALL(setresgid)
-SYSCALL(getresgid)
-COMPAT_SYS(prctl)
-COMPAT_SYS(rt_sigreturn)
-COMPAT_SYS(rt_sigaction)
-COMPAT_SYS(rt_sigprocmask)
-COMPAT_SYS(rt_sigpending)
-COMPAT_SYS(rt_sigtimedwait)
-COMPAT_SYS(rt_sigqueueinfo)
-COMPAT_SYS(rt_sigsuspend)
-COMPAT_SYS(pread64)
-COMPAT_SYS(pwrite64)
-SYSCALL(chown)
-SYSCALL(getcwd)
-SYSCALL(capget)
-SYSCALL(capset)
-COMPAT_SYS(sigaltstack)
-SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-PPC_SYS(vfork)
-COMPAT_SYS(getrlimit)
-COMPAT_SYS(readahead)
-SYS32ONLY(mmap2)
-SYS32ONLY(truncate64)
-SYS32ONLY(ftruncate64)
-SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
-SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
-SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
-SYSCALL(pciconfig_read)
-SYSCALL(pciconfig_write)
-SYSCALL(pciconfig_iobase)
-SYSCALL(ni_syscall)
-SYSCALL(getdents64)
-SYSCALL(pivot_root)
-SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
-SYSCALL(madvise)
-SYSCALL(mincore)
-SYSCALL(gettid)
-SYSCALL(tkill)
-SYSCALL(setxattr)
-SYSCALL(lsetxattr)
-SYSCALL(fsetxattr)
-SYSCALL(getxattr)
-SYSCALL(lgetxattr)
-SYSCALL(fgetxattr)
-SYSCALL(listxattr)
-SYSCALL(llistxattr)
-SYSCALL(flistxattr)
-SYSCALL(removexattr)
-SYSCALL(lremovexattr)
-SYSCALL(fremovexattr)
-COMPAT_SYS(futex)
-COMPAT_SYS(sched_setaffinity)
-COMPAT_SYS(sched_getaffinity)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYS32ONLY(sendfile64)
-COMPAT_SYS(io_setup)
-SYSCALL(io_destroy)
-COMPAT_SYS(io_getevents)
-COMPAT_SYS(io_submit)
-SYSCALL(io_cancel)
-SYSCALL(set_tid_address)
-SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
-SYSCALL(exit_group)
-SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
-SYSCALL(epoll_create)
-SYSCALL(epoll_ctl)
-SYSCALL(epoll_wait)
-SYSCALL(remap_file_pages)
-SYSX(sys_timer_create,compat_sys_timer_create,sys_timer_create)
-COMPAT_SYS(timer_settime)
-COMPAT_SYS(timer_gettime)
-SYSCALL(timer_getoverrun)
-SYSCALL(timer_delete)
-COMPAT_SYS(clock_settime)
-COMPAT_SYS(clock_gettime)
-COMPAT_SYS(clock_getres)
-COMPAT_SYS(clock_nanosleep)
-SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
-COMPAT_SYS(tgkill)
-COMPAT_SYS(utimes)
-COMPAT_SYS(statfs64)
-COMPAT_SYS(fstatfs64)
-SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
-PPC_SYS(rtas)
-OLDSYS(debug_setcontext)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS(mbind)
-COMPAT_SYS(get_mempolicy)
-COMPAT_SYS(set_mempolicy)
-COMPAT_SYS(mq_open)
-SYSCALL(mq_unlink)
-COMPAT_SYS(mq_timedsend)
-COMPAT_SYS(mq_timedreceive)
-COMPAT_SYS(mq_notify)
-COMPAT_SYS(mq_getsetattr)
-COMPAT_SYS(kexec_load)
-COMPAT_SYS(add_key)
-COMPAT_SYS(request_key)
-COMPAT_SYS(keyctl)
-COMPAT_SYS(waitid)
-COMPAT_SYS(ioprio_set)
-COMPAT_SYS(ioprio_get)
-SYSCALL(inotify_init)
-SYSCALL(inotify_add_watch)
-SYSCALL(inotify_rm_watch)
-SYSCALL(spu_run)
-SYSCALL(spu_create)
-COMPAT_SYS(pselect6)
-COMPAT_SYS(ppoll)
-SYSCALL(unshare)
-SYSCALL(splice)
-SYSCALL(tee)
-SYSCALL(vmsplice)
-COMPAT_SYS(openat)
-SYSCALL(mkdirat)
-SYSCALL(mknodat)
-SYSCALL(fchownat)
-COMPAT_SYS(futimesat)
-SYSX(sys_newfstatat, sys_fstatat64, sys_fstatat64)
-SYSCALL(unlinkat)
-SYSCALL(renameat)
-SYSCALL(linkat)
-SYSCALL(symlinkat)
-SYSCALL(readlinkat)
-SYSCALL(fchmodat)
-SYSCALL(faccessat)
-COMPAT_SYS(get_robust_list)
-COMPAT_SYS(set_robust_list)
-
-/*
- * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c
- * as well when appropriate.
- */
+#include <asm/systbl.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 24e3ad756de0..d20907561f46 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -76,7 +76,6 @@
/* keep track of when we need to update the rtc */
time_t last_rtc_update;
-extern int piranha_simulator;
#ifdef CONFIG_PPC_ISERIES
unsigned long iSeries_recal_titan = 0;
unsigned long iSeries_recal_tb = 0;
@@ -858,42 +857,50 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
-void __init generic_calibrate_decr(void)
+static int __init get_freq(char *name, int cells, unsigned long *val)
{
struct device_node *cpu;
unsigned int *fp;
- int node_found;
+ int found = 0;
- /*
- * The cpu node should have a timebase-frequency property
- * to tell us the rate at which the decrementer counts.
- */
+ /* The cpu node should have timebase and clock frequency properties */
cpu = of_find_node_by_type(NULL, "cpu");
- ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
- node_found = 0;
if (cpu) {
- fp = (unsigned int *)get_property(cpu, "timebase-frequency",
- NULL);
+ fp = (unsigned int *)get_property(cpu, name, NULL);
if (fp) {
- node_found = 1;
- ppc_tb_freq = *fp;
+ found = 1;
+ *val = 0;
+ while (cells--)
+ *val = (*val << 32) | *fp++;
}
+
+ of_node_put(cpu);
}
- if (!node_found)
+
+ return found;
+}
+
+void __init generic_calibrate_decr(void)
+{
+ ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
+
+ if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
+ !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
+
printk(KERN_ERR "WARNING: Estimating decrementer frequency "
"(not found)\n");
+ }
- ppc_proc_freq = DEFAULT_PROC_FREQ;
- node_found = 0;
- if (cpu) {
- fp = (unsigned int *)get_property(cpu, "clock-frequency",
- NULL);
- if (fp) {
- node_found = 1;
- ppc_proc_freq = *fp;
- }
+ ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
+
+ if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
+ !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
+
+ printk(KERN_ERR "WARNING: Estimating processor frequency "
+ "(not found)\n");
}
+
#ifdef CONFIG_BOOKE
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
@@ -905,11 +912,6 @@ void __init generic_calibrate_decr(void)
/* Enable decrementer interrupt */
mtspr(SPRN_TCR, TCR_DIE);
#endif
- if (!node_found)
- printk(KERN_ERR "WARNING: Estimating processor frequency "
- "(not found)\n");
-
- of_node_put(cpu);
}
unsigned long get_boot_time(void)
@@ -945,9 +947,9 @@ void __init time_init(void)
} else {
/* Normal PowerPC with timebase register */
ppc_md.calibrate_decr();
- printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
- printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
+ printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
tb_last_stamp = tb_last_jiffy = get_tb();
}
@@ -1010,10 +1012,7 @@ void __init time_init(void)
tb_to_ns_scale = scale;
tb_to_ns_shift = shift;
-#ifdef CONFIG_PPC_ISERIES
- if (!piranha_simulator)
-#endif
- tm = get_boot_time();
+ tm = get_boot_time();
write_seqlock_irqsave(&xtime_lock, flags);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 064a52564692..52f5659534f4 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <linux/kexec.h>
+#include <linux/backlight.h>
#include <asm/kdebug.h>
#include <asm/pgtable.h>
@@ -105,10 +106,18 @@ int die(const char *str, struct pt_regs *regs, long err)
spin_lock_irq(&die_lock);
bust_spinlocks(1);
#ifdef CONFIG_PMAC_BACKLIGHT
- if (machine_is(powermac)) {
- set_backlight_enable(1);
- set_backlight_level(BACKLIGHT_MAX);
+ mutex_lock(&pmac_backlight_mutex);
+ if (machine_is(powermac) && pmac_backlight) {
+ struct backlight_properties *props;
+
+ down(&pmac_backlight->sem);
+ props = pmac_backlight->props;
+ props->brightness = props->max_brightness;
+ props->power = FB_BLANK_UNBLANK;
+ props->update_status(pmac_backlight);
+ up(&pmac_backlight->sem);
}
+ mutex_unlock(&pmac_backlight_mutex);
#endif
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
#ifdef CONFIG_PREEMPT
@@ -658,7 +667,7 @@ static int emulate_instruction(struct pt_regs *regs)
u32 instword;
u32 rd;
- if (!user_mode(regs))
+ if (!user_mode(regs) || (regs->msr & MSR_LE))
return -EINVAL;
CHECK_FULL_REGS(regs);
@@ -805,9 +814,11 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
- int fixed;
+ int fixed = 0;
- fixed = fix_alignment(regs);
+ /* we don't implement logging of alignment exceptions */
+ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ fixed = fix_alignment(regs);
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 3774e80094f5..67d9fd9ae2b5 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/console.h>
+#include <linux/init.h>
#include <asm/processor.h>
#include <asm/udbg.h>
@@ -141,12 +142,14 @@ static int early_console_initialized;
void __init disable_early_printk(void)
{
-#if 1
if (!early_console_initialized)
return;
+ if (strstr(saved_command_line, "udbg-immortal")) {
+ printk(KERN_INFO "early console immortal !\n");
+ return;
+ }
unregister_console(&udbg_console);
early_console_initialized = 0;
-#endif
}
/* called by setup_system */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 573afb68d69e..bc3e15be3087 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
+ int rc;
#ifdef CONFIG_PPC64
if (test_thread_flag(TIF_32BIT)) {
@@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_base = VDSO32_MBASE;
#endif
- current->thread.vdso_base = 0;
+ current->mm->context.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (vma == NULL)
- return -ENOMEM;
-
- memset(vma, 0, sizeof(*vma));
-
/* Add a page to the vdso size for the data page */
vdso_pages ++;
@@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
+ down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
- if (vdso_base & ~PAGE_MASK) {
- kmem_cache_free(vm_area_cachep, vma);
- return (int)vdso_base;
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
}
- current->thread.vdso_base = vdso_base;
+ /* Allocate a VMA structure and fill it up */
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (vma == NULL) {
+ rc = -ENOMEM;
+ goto fail_mmapsem;
+ }
vma->vm_mm = mm;
- vma->vm_start = current->thread.vdso_base;
+ vma->vm_start = vdso_base;
vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT);
/*
@@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*/
- vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
- down_write(&mm->mmap_sem);
- if (insert_vm_struct(mm, vma)) {
- up_write(&mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
- return -ENOMEM;
- }
+ /* Insert new VMA */
+ rc = insert_vm_struct(mm, vma);
+ if (rc)
+ goto fail_vma;
+
+ /* Put vDSO base into mm struct and account for memory usage */
+ current->mm->context.vdso_base = vdso_base;
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);
-
return 0;
+
+ fail_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+ fail_mmapsem:
+ up_write(&mm->mmap_sem);
+ return rc;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
+ return "[vdso]";
+ return NULL;
}
+
+
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 66b3d03c5fa5..9416b4ab92ec 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -53,12 +53,12 @@ fpenable:
stfd fr31,8(r1)
LDCONST(fr1, fpzero)
mffs fr31
- mtfsf 0xff,fr1
+ MTFSF_L(fr1)
blr
fpdisable:
mtlr r12
- mtfsf 0xff,fr31
+ MTFSF_L(fr31)
lfd fr31,8(r1)
lfd fr1,16(r1)
lfd fr0,24(r1)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 971020cf3f7d..cdf5867838a6 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -13,27 +13,116 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/types.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/kobject.h>
+
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/vio.h>
#include <asm/prom.h>
-
-static const struct vio_device_id *vio_match_device(
- const struct vio_device_id *, const struct vio_dev *);
-
-struct vio_dev vio_bus_device = { /* fake "parent" device */
+#include <asm/firmware.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/iommu.h>
+
+extern struct subsystem devices_subsys; /* needed for vio_find_name() */
+
+static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = vio_bus_device.dev.bus_id,
.type = "",
.dev.bus_id = "vio",
.dev.bus = &vio_bus_type,
};
-static struct vio_bus_ops vio_bus_ops;
+#ifdef CONFIG_PPC_ISERIES
+struct device *iSeries_vio_dev = &vio_bus_device.dev;
+EXPORT_SYMBOL(iSeries_vio_dev);
+
+static struct iommu_table veth_iommu_table;
+static struct iommu_table vio_iommu_table;
+
+static void __init iommu_vio_init(void)
+{
+ iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
+ veth_iommu_table.it_size /= 2;
+ vio_iommu_table = veth_iommu_table;
+ vio_iommu_table.it_offset += veth_iommu_table.it_size;
+
+ if (!iommu_init_table(&veth_iommu_table, -1))
+ printk("Virtual Bus VETH TCE table failed.\n");
+ if (!iommu_init_table(&vio_iommu_table, -1))
+ printk("Virtual Bus VIO TCE table failed.\n");
+}
+#endif
+
+static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
+{
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ if (strcmp(dev->type, "network") == 0)
+ return &veth_iommu_table;
+ return &vio_iommu_table;
+ } else
+#endif
+ {
+ unsigned char *dma_window;
+ struct iommu_table *tbl;
+ unsigned long offset, size;
+
+ dma_window = get_property(dev->dev.platform_data,
+ "ibm,my-dma-window", NULL);
+ if (!dma_window)
+ return NULL;
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+
+ of_parse_dma_window(dev->dev.platform_data, dma_window,
+ &tbl->it_index, &offset, &size);
+
+ /* TCE table size - measured in tce entries */
+ tbl->it_size = size >> PAGE_SHIFT;
+ /* offset for VIO should always be 0 */
+ tbl->it_offset = offset >> PAGE_SHIFT;
+ tbl->it_busno = 0;
+ tbl->it_type = TCE_VB;
+
+ return iommu_init_table(tbl, -1);
+ }
+}
+
+/**
+ * vio_match_device: - Tell if a VIO device has a matching
+ * VIO device id structure.
+ * @ids: array of VIO device id structures to search in
+ * @dev: the VIO device structure to match against
+ *
+ * Used by a driver to check whether a VIO device present in the
+ * system is in its list of supported devices. Returns the matching
+ * vio_device_id structure or NULL if there is no match.
+ */
+static const struct vio_device_id *vio_match_device(
+ const struct vio_device_id *ids, const struct vio_dev *dev)
+{
+ while (ids->type[0] != '\0') {
+ if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
+ device_is_compatible(dev->dev.platform_data, ids->compat))
+ return ids;
+ ids++;
+ }
+ return NULL;
+}
/*
* Convert from struct device to struct vio_dev and pass to driver.
@@ -106,35 +195,110 @@ void vio_unregister_driver(struct vio_driver *viodrv)
}
EXPORT_SYMBOL(vio_unregister_driver);
+/* vio_dev refcount hit 0 */
+static void __devinit vio_dev_release(struct device *dev)
+{
+ if (dev->platform_data) {
+ /* XXX free TCE table */
+ of_node_put(dev->platform_data);
+ }
+ kfree(to_vio_dev(dev));
+}
+
/**
- * vio_match_device: - Tell if a VIO device has a matching
- * VIO device id structure.
- * @ids: array of VIO device id structures to search in
- * @dev: the VIO device structure to match against
+ * vio_register_device_node: - Register a new vio device.
+ * @of_node: The OF node for this device.
*
- * Used by a driver to check whether a VIO device present in the
- * system is in its list of supported devices. Returns the matching
- * vio_device_id structure or NULL if there is no match.
+ * Creates and initializes a vio_dev structure from the data in
+ * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * Returns a pointer to the created vio_dev or NULL if node has
+ * NULL device_type or compatible fields.
*/
-static const struct vio_device_id *vio_match_device(
- const struct vio_device_id *ids, const struct vio_dev *dev)
+struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
{
- while (ids->type[0] != '\0') {
- if (vio_bus_ops.match(ids, dev))
- return ids;
- ids++;
+ struct vio_dev *viodev;
+ unsigned int *unit_address;
+ unsigned int *irq_p;
+
+ /* we need the 'device_type' property, in order to match with drivers */
+ if (of_node->type == NULL) {
+ printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
+ __FUNCTION__,
+ of_node->name ? of_node->name : "<unknown>");
+ return NULL;
}
- return NULL;
+
+ unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
+ if (unit_address == NULL) {
+ printk(KERN_WARNING "%s: node %s missing 'reg'\n",
+ __FUNCTION__,
+ of_node->name ? of_node->name : "<unknown>");
+ return NULL;
+ }
+
+ /* allocate a vio_dev for this node */
+ viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
+ if (viodev == NULL)
+ return NULL;
+
+ viodev->dev.platform_data = of_node_get(of_node);
+
+ viodev->irq = NO_IRQ;
+ irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
+ if (irq_p) {
+ int virq = virt_irq_create_mapping(*irq_p);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", of_node->full_name);
+ } else
+ viodev->irq = irq_offset_up(virq);
+ }
+
+ snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
+ viodev->name = of_node->name;
+ viodev->type = of_node->type;
+ viodev->unit_address = *unit_address;
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ unit_address = (unsigned int *)get_property(of_node,
+ "linux,unit_address", NULL);
+ if (unit_address != NULL)
+ viodev->unit_address = *unit_address;
+ }
+ viodev->iommu_table = vio_build_iommu_table(viodev);
+
+ /* init generic 'struct device' fields: */
+ viodev->dev.parent = &vio_bus_device.dev;
+ viodev->dev.bus = &vio_bus_type;
+ viodev->dev.release = vio_dev_release;
+
+ /* register with generic device framework */
+ if (device_register(&viodev->dev)) {
+ printk(KERN_ERR "%s: failed to register device %s\n",
+ __FUNCTION__, viodev->dev.bus_id);
+ /* XXX free TCE table */
+ kfree(viodev);
+ return NULL;
+ }
+
+ return viodev;
}
+EXPORT_SYMBOL(vio_register_device_node);
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
-int __init vio_bus_init(struct vio_bus_ops *ops)
+static int __init vio_bus_init(void)
{
int err;
+ struct device_node *node_vroot;
- vio_bus_ops = *ops;
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ iommu_vio_init();
+ vio_bus_device.iommu_table = &vio_iommu_table;
+ iSeries_vio_dev = &vio_bus_device.dev;
+ }
+#endif
err = bus_register(&vio_bus_type);
if (err) {
@@ -153,47 +317,48 @@ int __init vio_bus_init(struct vio_bus_ops *ops)
return err;
}
- return 0;
-}
+ node_vroot = find_devices("vdevice");
+ if (node_vroot) {
+ struct device_node *of_node;
+
+ /*
+ * Create struct vio_devices for each virtual device in
+ * the device tree. Drivers will associate with them later.
+ */
+ for (of_node = node_vroot->child; of_node != NULL;
+ of_node = of_node->sibling) {
+ printk(KERN_DEBUG "%s: processing %p\n",
+ __FUNCTION__, of_node);
+ vio_register_device_node(of_node);
+ }
+ }
-/* vio_dev refcount hit 0 */
-static void __devinit vio_dev_release(struct device *dev)
-{
- if (vio_bus_ops.release_device)
- vio_bus_ops.release_device(dev);
- kfree(to_vio_dev(dev));
+ return 0;
}
+__initcall(vio_bus_init);
-static ssize_t viodev_show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
}
-DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
-struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- /* init generic 'struct device' fields: */
- viodev->dev.parent = &vio_bus_device.dev;
- viodev->dev.bus = &vio_bus_type;
- viodev->dev.release = vio_dev_release;
-
- /* register with generic device framework */
- if (device_register(&viodev->dev)) {
- printk(KERN_ERR "%s: failed to register device %s\n",
- __FUNCTION__, viodev->dev.bus_id);
- return NULL;
- }
- device_create_file(&viodev->dev, &dev_attr_name);
+ struct device_node *of_node = dev->platform_data;
- return viodev;
+ return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
+static struct device_attribute vio_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(devspec),
+ __ATTR_NULL
+};
+
void __devinit vio_unregister_device(struct vio_dev *viodev)
{
- if (vio_bus_ops.unregister_device)
- vio_bus_ops.unregister_device(viodev);
- device_remove_file(&viodev->dev, &dev_attr_name);
device_unregister(&viodev->dev);
}
EXPORT_SYMBOL(vio_unregister_device);
@@ -229,7 +394,7 @@ static void *vio_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
- dma_handle, ~0ul, flag);
+ dma_handle, ~0ul, flag, -1);
}
static void vio_free_coherent(struct device *dev, size_t size,
@@ -267,22 +432,23 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
+ struct device_node *dn = dev->platform_data;
char *cp;
int length;
if (!num_envp)
return -ENOMEM;
- if (!vio_dev->dev.platform_data)
+ if (!dn)
return -ENODEV;
- cp = (char *)get_property(vio_dev->dev.platform_data, "compatible", &length);
+ cp = (char *)get_property(dn, "compatible", &length);
if (!cp)
return -ENODEV;
envp[0] = buffer;
length = scnprintf(buffer, buffer_size, "MODALIAS=vio:T%sS%s",
vio_dev->type, cp);
- if (buffer_size - length <= 0)
+ if ((buffer_size - length) <= 0)
return -ENOMEM;
envp[1] = NULL;
return 0;
@@ -290,9 +456,81 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
struct bus_type vio_bus_type = {
.name = "vio",
+ .dev_attrs = vio_dev_attrs,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
.shutdown = vio_bus_shutdown,
};
+
+/**
+ * vio_get_attribute: - get attribute for virtual device
+ * @vdev: The vio device to get property.
+ * @which: The property/attribute to be extracted.
+ * @length: Pointer to length of returned data size (unused if NULL).
+ *
+ * Calls prom.c's get_property() to return the value of the
+ * attribute specified by @which
+*/
+const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
+{
+ return get_property(vdev->dev.platform_data, which, length);
+}
+EXPORT_SYMBOL(vio_get_attribute);
+
+#ifdef CONFIG_PPC_PSERIES
+/* vio_find_name() - internal because only vio.c knows how we formatted the
+ * kobject name
+ * XXX once vio_bus_type.devices is actually used as a kset in
+ * drivers/base/bus.c, this function should be removed in favor of
+ * "device_find(kobj_name, &vio_bus_type)"
+ */
+static struct vio_dev *vio_find_name(const char *kobj_name)
+{
+ struct kobject *found;
+
+ found = kset_find_obj(&devices_subsys.kset, kobj_name);
+ if (!found)
+ return NULL;
+
+ return to_vio_dev(container_of(found, struct device, kobj));
+}
+
+/**
+ * vio_find_node - find an already-registered vio_dev
+ * @vnode: device_node of the virtual device we're looking for
+ */
+struct vio_dev *vio_find_node(struct device_node *vnode)
+{
+ uint32_t *unit_address;
+ char kobj_name[BUS_ID_SIZE];
+
+ /* construct the kobject name from the device node */
+ unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
+ if (!unit_address)
+ return NULL;
+ snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
+
+ return vio_find_name(kobj_name);
+}
+EXPORT_SYMBOL(vio_find_node);
+
+int vio_enable_interrupts(struct vio_dev *dev)
+{
+ int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(vio_enable_interrupts);
+
+int vio_disable_interrupts(struct vio_dev *dev)
+{
+ int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(vio_disable_interrupts);
+#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index fe79c2584cb0..8b25953dc4f0 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -93,6 +93,11 @@ SECTIONS
__ptov_table_begin = .;
*(.ptov_fixup);
__ptov_table_end = .;
+#ifdef CONFIG_PPC_ISERIES
+ __dt_strings_start = .;
+ *(.dt_strings);
+ __dt_strings_end = .;
+#endif
}
. = ALIGN(16);