diff options
Diffstat (limited to 'arch/powerpc/kernel')
47 files changed, 556 insertions, 483 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a27ccd5dc6b9..de91f3ae631e 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -54,8 +54,6 @@ struct aligninfo { /* DSISR bits reported for a DCBZ instruction: */ #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ -#define SWAP(a, b) (t = (a), (a) = (b), (b) = t) - /* * The PowerPC stores certain bits of the instruction that caused the * alignment exception in the DSISR register. This array maps those @@ -256,11 +254,17 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) * bottom 4 bytes of each register, and the loads clear the * top 4 bytes of the affected register. */ +#ifdef __BIG_ENDIAN__ #ifdef CONFIG_PPC64 #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4) #else #define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) #endif +#endif + +#ifdef __LITTLE_ENDIAN__ +#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3)))) +#endif #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz)) @@ -305,6 +309,15 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, nb0 = nb + reg * 4 - 128; nb = 128 - reg * 4; } +#ifdef __LITTLE_ENDIAN__ + /* + * String instructions are endian neutral but the code + * below is not. Force byte swapping on so that the + * effects of swizzling are undone in the load/store + * loops below. + */ + flags ^= SW; +#endif } else { /* lwm, stmw */ nb = (32 - reg) * 4; @@ -458,7 +471,7 @@ static struct aligninfo spe_aligninfo[32] = { static int emulate_spe(struct pt_regs *regs, unsigned int reg, unsigned int instr) { - int t, ret; + int ret; union { u64 ll; u32 w[2]; @@ -581,24 +594,18 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, if (flags & SW) { switch (flags & 0xf0) { case E8: - SWAP(data.v[0], data.v[7]); - SWAP(data.v[1], data.v[6]); - SWAP(data.v[2], data.v[5]); - SWAP(data.v[3], data.v[4]); + data.ll = swab64(data.ll); break; case E4: - - SWAP(data.v[0], data.v[3]); - SWAP(data.v[1], data.v[2]); - SWAP(data.v[4], data.v[7]); - SWAP(data.v[5], data.v[6]); + data.w[0] = swab32(data.w[0]); + data.w[1] = swab32(data.w[1]); break; /* Its half word endian */ default: - SWAP(data.v[0], data.v[1]); - SWAP(data.v[2], data.v[3]); - SWAP(data.v[4], data.v[5]); - SWAP(data.v[6], data.v[7]); + data.h[0] = swab16(data.h[0]); + data.h[1] = swab16(data.h[1]); + data.h[2] = swab16(data.h[2]); + data.h[3] = swab16(data.h[3]); break; } } @@ -658,14 +665,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, flush_vsx_to_thread(current); if (reg < 32) - ptr = (char *) ¤t->thread.TS_FPR(reg); + ptr = (char *) ¤t->thread.fp_state.fpr[reg][0]; else - ptr = (char *) ¤t->thread.vr[reg - 32]; + ptr = (char *) ¤t->thread.vr_state.vr[reg - 32]; lptr = (unsigned long *) ptr; +#ifdef __LITTLE_ENDIAN__ + if (flags & SW) { + elsize = length; + sw = length-1; + } else { + /* + * The elements are BE ordered, even in LE mode, so process + * them in reverse order. + */ + addr += length - elsize; + + /* 8 byte memory accesses go in the top 8 bytes of the VR */ + if (length == 8) + ptr += 8; + } +#else if (flags & SW) sw = elsize-1; +#endif for (j = 0; j < length; j += elsize) { for (i = 0; i < elsize; ++i) { @@ -675,19 +699,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, ret |= __get_user(ptr[i^sw], addr + i); } ptr += elsize; +#ifdef __LITTLE_ENDIAN__ + addr -= elsize; +#else addr += elsize; +#endif } +#ifdef __BIG_ENDIAN__ +#define VSX_HI 0 +#define VSX_LO 1 +#else +#define VSX_HI 1 +#define VSX_LO 0 +#endif + if (!ret) { if (flags & U) regs->gpr[areg] = regs->dar; /* Splat load copies the same data to top and bottom 8 bytes */ if (flags & SPLT) - lptr[1] = lptr[0]; - /* For 8 byte loads, zero the top 8 bytes */ + lptr[VSX_LO] = lptr[VSX_HI]; + /* For 8 byte loads, zero the low 8 bytes */ else if (!(flags & ST) && (8 == length)) - lptr[1] = 0; + lptr[VSX_LO] = 0; } else return -EFAULT; @@ -710,18 +746,28 @@ int fix_alignment(struct pt_regs *regs) unsigned int dsisr; unsigned char __user *addr; unsigned long p, swiz; - int ret, t; - union { + int ret, i; + union data { u64 ll; double dd; unsigned char v[8]; struct { +#ifdef __LITTLE_ENDIAN__ + int low32; + unsigned hi32; +#else unsigned hi32; int low32; +#endif } x32; struct { +#ifdef __LITTLE_ENDIAN__ + short low16; + unsigned char hi48[6]; +#else unsigned char hi48[6]; short low16; +#endif } x16; } data; @@ -780,8 +826,9 @@ int fix_alignment(struct pt_regs *regs) /* Byteswap little endian loads and stores */ swiz = 0; - if (regs->msr & MSR_LE) { + if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { flags ^= SW; +#ifdef __BIG_ENDIAN__ /* * So-called "PowerPC little endian" mode works by * swizzling addresses rather than by actually doing @@ -794,6 +841,7 @@ int fix_alignment(struct pt_regs *regs) */ if (cpu_has_feature(CPU_FTR_PPC_LE)) swiz = 7; +#endif } /* DAR has the operand effective address */ @@ -818,7 +866,7 @@ int fix_alignment(struct pt_regs *regs) elsize = 8; flags = 0; - if (regs->msr & MSR_LE) + if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) flags |= SW; if (instruction & 0x100) flags |= ST; @@ -878,32 +926,36 @@ int fix_alignment(struct pt_regs *regs) * get it from register values */ if (!(flags & ST)) { - data.ll = 0; - ret = 0; - p = (unsigned long) addr; + unsigned int start = 0; + switch (nb) { - case 8: - ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++)); case 4: - ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++)); + start = offsetof(union data, x32.low32); + break; case 2: - ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++)); - if (unlikely(ret)) - return -EFAULT; + start = offsetof(union data, x16.low16); + break; } + + data.ll = 0; + ret = 0; + p = (unsigned long)addr; + + for (i = 0; i < nb; i++) + ret |= __get_user_inatomic(data.v[start + i], + SWIZ_PTR(p++)); + + if (unlikely(ret)) + return -EFAULT; + } else if (flags & F) { - data.dd = current->thread.TS_FPR(reg); + data.ll = current->thread.TS_FPR(reg); if (flags & S) { /* Single-precision FP store requires conversion... */ #ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); - cvt_df(&data.dd, (float *)&data.v[4]); + cvt_df(&data.dd, (float *)&data.x32.low32); preempt_enable(); #else return 0; @@ -915,17 +967,13 @@ int fix_alignment(struct pt_regs *regs) if (flags & SW) { switch (nb) { case 8: - SWAP(data.v[0], data.v[7]); - SWAP(data.v[1], data.v[6]); - SWAP(data.v[2], data.v[5]); - SWAP(data.v[3], data.v[4]); + data.ll = swab64(data.ll); break; case 4: - SWAP(data.v[4], data.v[7]); - SWAP(data.v[5], data.v[6]); + data.x32.low32 = swab32(data.x32.low32); break; case 2: - SWAP(data.v[6], data.v[7]); + data.x16.low16 = swab16(data.x16.low16); break; } } @@ -947,7 +995,7 @@ int fix_alignment(struct pt_regs *regs) #ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); - cvt_fd((float *)&data.v[4], &data.dd); + cvt_fd((float *)&data.x32.low32, &data.dd); preempt_enable(); #else return 0; @@ -957,25 +1005,28 @@ int fix_alignment(struct pt_regs *regs) /* Store result to memory or update registers */ if (flags & ST) { - ret = 0; - p = (unsigned long) addr; + unsigned int start = 0; + switch (nb) { - case 8: - ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++)); case 4: - ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++)); + start = offsetof(union data, x32.low32); + break; case 2: - ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++)); + start = offsetof(union data, x16.low16); + break; } + + ret = 0; + p = (unsigned long)addr; + + for (i = 0; i < nb; i++) + ret |= __put_user_inatomic(data.v[start + i], + SWIZ_PTR(p++)); + if (unlikely(ret)) return -EFAULT; } else if (flags & F) - current->thread.TS_FPR(reg) = data.dd; + current->thread.TS_FPR(reg) = data.ll; else regs->gpr[reg] = data.ll; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 479b036d36d7..2ea5cc033ec8 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -90,16 +90,17 @@ int main(void) DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); #endif DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); - DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); - DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); + DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); + DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area)); + DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); #ifdef CONFIG_ALTIVEC - DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); + DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); + DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area)); DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); - DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); + DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX - DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr)); DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); #endif /* CONFIG_VSX */ #ifdef CONFIG_PPC64 @@ -143,20 +144,12 @@ int main(void) DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); - DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, - transact_vr[0])); - DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct, - transact_vscr)); + DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct, + transact_vr)); DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct, transact_vrsave)); - DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct, - transact_fpr[0])); - DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct, - transact_fpscr)); -#ifdef CONFIG_VSX - DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct, - transact_fpr[0])); -#endif + DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct, + transact_fp)); /* Local pt_regs on stack for Transactional Memory funcs. */ DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 55593ee2d5aa..671302065347 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -189,14 +189,13 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) } /* If PCI-E capable, dump PCI-E cap 10, and the AER */ - cap = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (cap) { + if (pci_is_pcie(dev)) { n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); printk(KERN_WARNING "EEH: PCI-E capabilities and status follow:\n"); for (i=0; i<=8; i++) { - eeh_ops->read_config(dn, cap+4*i, 4, &cfg); + eeh_ops->read_config(dn, dev->pcie_cap+4*i, 4, &cfg); n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg); } @@ -327,11 +326,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) /* Isolate the PHB and send event */ eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); eeh_serialize_unlock(flags); - eeh_send_failure_event(phb_pe); pr_err("EEH: PHB#%x failure detected\n", phb_pe->phb->global_number); dump_stack(); + eeh_send_failure_event(phb_pe); return 1; out: @@ -454,8 +453,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev) eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_serialize_unlock(flags); - eeh_send_failure_event(pe); - /* Most EEH events are due to device driver bugs. Having * a stack trace will help the device-driver authors figure * out what happened. So print that out. @@ -464,6 +461,8 @@ int eeh_dev_check_failure(struct eeh_dev *edev) pe->addr, pe->phb->global_number); dump_stack(); + eeh_send_failure_event(pe); + return 1; dn_unlock: diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c04cdf70d487..bbfb0294b354 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -673,9 +673,7 @@ _GLOBAL(ret_from_except_lite) resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ - CURRENT_THREAD_INFO(r9, r1) - ld r8,TI_FLAGS(r9) - andis. r8,r8,_TIF_EMULATE_STACK_STORE@h + andis. r8,r4,_TIF_EMULATE_STACK_STORE@h beq+ 1f addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ @@ -820,6 +818,12 @@ fast_exception_return: andi. r0,r3,MSR_RI beq- unrecov_restore + /* Load PPR from thread struct before we clear MSR:RI */ +BEGIN_FTR_SECTION + ld r2,PACACURRENT(r13) + ld r2,TASKTHREADPPR(r2) +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + /* * Clear RI before restoring r13. If we are returning to * userspace and we take an exception after restoring r13, @@ -840,8 +844,10 @@ fast_exception_return: */ andi. r0,r3,MSR_PR beq 1f +BEGIN_FTR_SECTION + mtspr SPRN_PPR,r2 /* Restore PPR */ +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ACCOUNT_CPU_USER_EXIT(r2, r4) - RESTORE_PPR(r2, r4) REST_GPR(13, r1) 1: mtspr SPRN_SRR1,r3 @@ -1017,7 +1023,7 @@ _GLOBAL(enter_rtas) li r9,1 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) - ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI + ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE andc r6,r0,r9 sync /* disable interrupts so SRR0/1 */ mtmsrd r0 /* don't get trashed */ @@ -1032,6 +1038,8 @@ _GLOBAL(enter_rtas) b . /* prevent speculative execution */ _STATIC(rtas_return_loc) + FIXUP_ENDIAN + /* relocation is off at this point */ GET_PACA(r4) clrldi r4,r4,2 /* convert to realmode address */ @@ -1103,28 +1111,30 @@ _GLOBAL(enter_prom) std r10,_CCR(r1) std r11,_MSR(r1) - /* Get the PROM entrypoint */ - mtlr r4 + /* Put PROM address in SRR0 */ + mtsrr0 r4 - /* Switch MSR to 32 bits mode + /* Setup our trampoline return addr in LR */ + bcl 20,31,$+4 +0: mflr r4 + addi r4,r4,(1f - 0b) + mtlr r4 + + /* Prepare a 32-bit mode big endian MSR */ #ifdef CONFIG_PPC_BOOK3E rlwinm r11,r11,0,1,31 - mtmsr r11 + mtsrr1 r11 + rfi #else /* CONFIG_PPC_BOOK3E */ - mfmsr r11 - li r12,1 - rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) - andc r11,r11,r12 - li r12,1 - rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) - andc r11,r11,r12 - mtmsrd r11 + LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) + andc r11,r11,r12 + mtsrr1 r11 + rfid #endif /* CONFIG_PPC_BOOK3E */ - isync - /* Enter PROM here... */ - blrl +1: /* Return from OF */ + FIXUP_ENDIAN /* Just make sure that r1 top 32 bits didn't get * corrupt by OF diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 6300c13bbde4..7898be90f2dc 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -18,6 +18,7 @@ */ #include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/epapr_hcalls.h> #include <asm/cacheflush.h> #include <asm/code-patching.h> diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 2d067049db27..e7751561fd1d 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -399,7 +399,7 @@ interrupt_end_book3e: /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); - NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, + NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP) @@ -421,7 +421,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* AltiVec Assist */ START_EXCEPTION(altivec_assist); - NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST, + NORMAL_EXCEPTION_PROLOG(0x220, + BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE) bl .save_nvgprs @@ -607,6 +608,7 @@ kernel_dbg_exc: NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) + CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD bl .performance_monitor_exception b .ret_from_except_lite diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index caeaabf11a2f..f7f5b8bed68f 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: REST_32VSRS(n,c,base); \ 3: -#define __REST_32FPVSRS_TRANSACT(n,c,base) \ -BEGIN_FTR_SECTION \ - b 2f; \ -END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ - REST_32FPRS_TRANSACT(n,base); \ - b 3f; \ -2: REST_32VSRS_TRANSACT(n,c,base); \ -3: - #define __SAVE_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ @@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 3: #else #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) -#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base) #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) #endif #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) -#define REST_32FPVSRS_TRANSACT(n,c,base) \ - __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base) #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -/* - * Wrapper to call load_up_fpu from C. - * void do_load_up_fpu(struct pt_regs *regs); - */ -_GLOBAL(do_load_up_fpu) - mflr r0 - std r0, 16(r1) - stdu r1, -112(r1) - - subi r6, r3, STACK_FRAME_OVERHEAD - /* load_up_fpu expects r12=MSR, r13=PACA, and returns - * with r12 = new MSR. - */ - ld r12,_MSR(r6) - GET_PACA(r13) - - bl load_up_fpu - std r12,_MSR(r6) - - ld r0, 112+16(r1) - addi r1, r1, 112 - mtlr r0 - blr - - /* void do_load_up_transact_fpu(struct thread_struct *thread) * * This is similar to load_up_fpu but for the transactional version of the FP @@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) SYNC MTMSRD(r5) - lfd fr0,THREAD_TRANSACT_FPSCR(r3) + addi r7,r3,THREAD_TRANSACT_FPSTATE + lfd fr0,FPSTATE_FPSCR(r7) MTFSF_L(fr0) - REST_32FPVSRS_TRANSACT(0, R4, R3) + REST_32FPVSRS(0, R4, R7) /* FP/VSX off again */ MTMSRD(r6) @@ -117,11 +81,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* + * Load state from memory into FP registers including FPSCR. + * Assumes the caller has enabled FP in the MSR. + */ +_GLOBAL(load_fp_state) + lfd fr0,FPSTATE_FPSCR(r3) + MTFSF_L(fr0) + REST_32FPVSRS(0, R4, R3) + blr + +/* + * Store FP state into memory, including FPSCR + * Assumes the caller has enabled FP in the MSR. + */ +_GLOBAL(store_fp_state) + SAVE_32FPVSRS(0, R4, R3) + mffs fr0 + stfd fr0,FPSTATE_FPSCR(r3) + blr + +/* * This task wants to use the FPU now. * On UP, disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Load up this task's FP registers from its thread_struct, * enable the FPU for the current task and return to the task. + * Note that on 32-bit this can only use registers that will be + * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_fpu) mfmsr r5 @@ -147,9 +133,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) beq 1f toreal(r4) addi r4,r4,THREAD /* want last_task_used_math->thread */ - SAVE_32FPVSRS(0, R5, R4) + addi r10,r4,THREAD_FPSTATE + SAVE_32FPVSRS(0, R5, R10) mffs fr0 - stfd fr0,THREAD_FPSCR(r4) + stfd fr0,FPSTATE_FPSCR(r10) PPC_LL r5,PT_REGS(r4) toreal(r5) PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) @@ -160,7 +147,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif /* CONFIG_SMP */ /* enable use of FP after return */ #ifdef CONFIG_PPC32 - mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ + mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ lwz r4,THREAD_FPEXC_MODE(r5) ori r9,r9,MSR_FP /* enable FP for current */ or r9,r9,r4 @@ -172,9 +159,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) or r12,r12,r4 std r12,_MSR(r1) #endif - lfd fr0,THREAD_FPSCR(r5) + addi r10,r5,THREAD_FPSTATE + lfd fr0,FPSTATE_FPSCR(r10) MTFSF_L(fr0) - REST_32FPVSRS(0, R4, R5) + REST_32FPVSRS(0, R4, R10) #ifndef CONFIG_SMP subi r4,r5,THREAD fromreal(r4) @@ -206,11 +194,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) PPC_LCMPI 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ + PPC_LL r6,THREAD_FPSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) - PPC_LCMPI 0,r5,0 - SAVE_32FPVSRS(0, R4 ,R3) + PPC_LCMPI 0,r6,0 + bne 2f + addi r6,r3,THREAD_FPSTATE +2: PPC_LCMPI 0,r5,0 + SAVE_32FPVSRS(0, R4, R6) mffs fr0 - stfd fr0,THREAD_FPSCR(r3) + stfd fr0,FPSTATE_FPSCR(r6) beq 1f PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r3,MSR_FP|MSR_FE0|MSR_FE1 diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 1fb78561096a..9b27b293a922 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -174,7 +174,11 @@ __ftrace_make_nop(struct module *mod, pr_devel(" %08x %08x\n", jmp[0], jmp[1]); +#ifdef __LITTLE_ENDIAN__ + ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; +#else ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; +#endif /* This should match what was called */ if (ptr != ppc_function_entry((void *)addr)) { diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 3d11d8038dee..2ae41aba4053 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -68,6 +68,7 @@ _stext: _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION + FIXUP_ENDIAN b .__start_initialization_multiplatform END_FTR_SECTION(0, 1) @@ -115,6 +116,7 @@ __run_at_load: */ .globl __secondary_hold __secondary_hold: + FIXUP_ENDIAN #ifndef CONFIG_PPC_BOOK3E mfmsr r24 ori r24,r24,MSR_RI @@ -205,6 +207,7 @@ _GLOBAL(generic_secondary_thread_init) * as SCOM before entry). */ _GLOBAL(generic_secondary_smp_init) + FIXUP_ENDIAN mr r24,r3 mr r25,r4 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1b92a97b1b04..7ee876d2adb5 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -858,6 +858,9 @@ initial_mmu: addis r11, r11, 0x0080 /* Add 8M */ mtspr SPRN_MD_RPN, r11 + addi r10, r10, 0x0100 + mtspr SPRN_MD_CTR, r10 + addis r8, r8, 0x0080 /* Add 8M */ mtspr SPRN_MD_EPN, r8 mtspr SPRN_MD_TWC, r9 diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 289afaffbbb5..f45726a1d963 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -555,27 +555,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #ifdef CONFIG_SPE /* SPE Unavailable */ START_EXCEPTION(SPEUnavailable) - NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL) + NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL) beq 1f bl load_up_spe b fast_exception_return 1: addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_EE_LITE(0x2010, KernelSPE) #else - EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \ + EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \ unknown_exception, EXC_XFER_EE) #endif /* CONFIG_SPE */ /* SPE Floating Point Data */ #ifdef CONFIG_SPE - EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \ - SPEFloatingPointException, EXC_XFER_EE); + EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData, + SPEFloatingPointException, EXC_XFER_EE) /* SPE Floating Point Round */ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ SPEFloatingPointRoundException, EXC_XFER_EE) #else - EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \ + EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ unknown_exception, EXC_XFER_EE) diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 16a7c2326d48..1114d13ac19f 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -292,6 +292,7 @@ out: return rc; return count; } +static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe); static ssize_t ibmebus_store_remove(struct bus_type *bus, const char *buf, size_t count) @@ -317,13 +318,14 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, return -ENODEV; } } +static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove); - -static struct bus_attribute ibmebus_bus_attrs[] = { - __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), - __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), - __ATTR_NULL +static struct attribute *ibmbus_bus_attrs[] = { + &bus_attr_probe.attr, + &bus_attr_remove.attr, + NULL, }; +ATTRIBUTE_GROUPS(ibmbus_bus); static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) { @@ -713,7 +715,7 @@ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { struct bus_type ibmebus_bus_type = { .name = "ibmebus", .uevent = of_device_uevent_modalias, - .bus_attrs = ibmebus_bus_attrs, + .bus_groups = ibmbus_bus_groups, .match = ibmebus_bus_bus_match, .probe = ibmebus_bus_device_probe, .remove = ibmebus_bus_device_remove, diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c7cb8c232d2f..ba0165615215 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -594,7 +594,7 @@ void irq_ctx_init(void) } } -static inline void do_softirq_onstack(void) +void do_softirq_own_stack(void) { struct thread_info *curtp, *irqtp; @@ -612,21 +612,6 @@ static inline void do_softirq_onstack(void) set_bits(irqtp->flags, &curtp->flags); } -void do_softirq(void) -{ - unsigned long flags; - - if (in_interrupt()) - return; - - local_irq_save(flags); - - if (local_softirq_pending()) - do_softirq_onstack(); - - local_irq_restore(flags); -} - irq_hw_number_t virq_to_hw(unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index c1eef241017a..83e89d310734 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -151,15 +151,16 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) return 1; } +static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info); static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; - struct thread_info *backup_current_thread_info; + struct thread_info *backup_current_thread_info = + &__get_cpu_var(kgdb_thread_info); if (user_mode(regs)) return 0; - backup_current_thread_info = kmalloc(sizeof(struct thread_info), GFP_KERNEL); /* * On Book E and perhaps other processors, singlestep is handled on * the critical exception stack. This causes current_thread_info() @@ -185,7 +186,6 @@ static int kgdb_singlestep(struct pt_regs *regs) /* Restore current_thread_info lastly. */ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); - kfree(backup_current_thread_info); return 1; } diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 22e88dd2f34a..40bd7bd4e19a 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -35,7 +35,7 @@ static struct legacy_serial_info { phys_addr_t taddr; } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; -static struct __initdata of_device_id legacy_serial_parents[] = { +static struct of_device_id legacy_serial_parents[] __initdata = { {.type = "soc",}, {.type = "tsi-bridge",}, {.type = "opb", }, diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 611acdf30096..be4e6d648f60 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -312,7 +312,7 @@ static union thread_union kexec_stack __init_task_data = */ struct paca_struct kexec_paca; -/* Our assembly helper, in kexec_stub.S */ +/* Our assembly helper, in misc_64.S */ extern void kexec_sequence(void *newstack, unsigned long start, void *image, void *control, void (*clear_all)(void)) __noreturn; diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 2b0ad9845363..e47d268727a4 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -659,6 +659,20 @@ _GLOBAL(__lshrdi3) blr /* + * 64-bit comparison: __cmpdi2(s64 a, s64 b) + * Returns 0 if a < b, 1 if a == b, 2 if a > b. + */ +_GLOBAL(__cmpdi2) + cmpw r3,r5 + li r3,1 + bne 1f + cmplw r4,r6 + beqlr +1: li r3,0 + bltlr + li r3,2 + blr +/* * 64-bit comparison: __ucmpdi2(u64 a, u64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. */ diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 2d275707f419..9547381b631a 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -25,8 +25,7 @@ #include <asm/uaccess.h> #include <asm/firmware.h> #include <linux/sort.h> - -#include "setup.h" +#include <asm/setup.h> LIST_HEAD(module_bug_list); diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 2e3200ca485f..6cff040bf456 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -26,8 +26,7 @@ #include <linux/cache.h> #include <linux/bug.h> #include <linux/sort.h> - -#include "setup.h" +#include <asm/setup.h> #if 0 #define DEBUGP printk diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 6ee59a0eb268..12664c130d73 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -26,8 +26,7 @@ #include <asm/firmware.h> #include <asm/code-patching.h> #include <linux/sort.h> - -#include "setup.h" +#include <asm/setup.h> /* FIXME: We don't do .init separately. To do this, we'd need to have a separate r2 value in the init and core section, and stub between @@ -62,6 +61,16 @@ struct ppc64_stub_entry r2) into the stub. */ static struct ppc64_stub_entry ppc64_stub = { .jump = { +#ifdef __LITTLE_ENDIAN__ + 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */ + 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */ + /* Save current r2 value in magic place on the stack. */ + 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ + 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ + 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ + 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ + 0x20, 0x04, 0x80, 0x4e /* bctr */ +#else 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ /* Save current r2 value in magic place on the stack. */ @@ -70,6 +79,7 @@ static struct ppc64_stub_entry ppc64_stub = 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ 0x4e, 0x80, 0x04, 0x20 /* bctr */ +#endif } }; /* Count how many different 24-bit relocations (different symbol, @@ -269,8 +279,13 @@ static inline int create_stub(Elf64_Shdr *sechdrs, *entry = ppc64_stub; +#ifdef __LITTLE_ENDIAN__ + loc1 = (Elf64_Half *)&entry->jump[0]; + loc2 = (Elf64_Half *)&entry->jump[4]; +#else loc1 = (Elf64_Half *)&entry->jump[2]; loc2 = (Elf64_Half *)&entry->jump[6]; +#endif /* Stub uses address relative to r2. */ reladdr = (unsigned long)entry - my_r2(sechdrs, me); diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 8213ee1eb05a..fd82c289ab1c 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -223,9 +223,13 @@ static int __init nvram_write_header(struct nvram_partition * part) { loff_t tmp_index; int rc; - + struct nvram_header phead; + + memcpy(&phead, &part->header, NVRAM_HEADER_LEN); + phead.length = cpu_to_be16(phead.length); + tmp_index = part->index; - rc = ppc_md.nvram_write((char *)&part->header, NVRAM_HEADER_LEN, &tmp_index); + rc = ppc_md.nvram_write((char *)&phead, NVRAM_HEADER_LEN, &tmp_index); return rc; } @@ -505,6 +509,8 @@ int __init nvram_scan_partitions(void) memcpy(&phead, header, NVRAM_HEADER_LEN); + phead.length = be16_to_cpu(phead.length); + err = 0; c_sum = nvram_checksum(&phead); if (c_sum != phead.checksum) { diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 3fc16e3beb9f..0620eaaaad45 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -46,7 +46,7 @@ struct lppaca lppaca[] = { static struct lppaca *extra_lppacas; static long __initdata lppaca_size; -static void allocate_lppacas(int nr_cpus, unsigned long limit) +static void __init allocate_lppacas(int nr_cpus, unsigned long limit) { if (nr_cpus <= NR_LPPACAS) return; @@ -57,7 +57,7 @@ static void allocate_lppacas(int nr_cpus, unsigned long limit) PAGE_SIZE, limit)); } -static struct lppaca *new_lppaca(int cpu) +static struct lppaca * __init new_lppaca(int cpu) { struct lppaca *lp; @@ -70,7 +70,7 @@ static struct lppaca *new_lppaca(int cpu) return lp; } -static void free_lppacas(void) +static void __init free_lppacas(void) { long new_size = 0, nr; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 905a24bb7acc..a1e3e40ca3fd 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -228,7 +228,7 @@ int pcibios_add_platform_entries(struct pci_dev *pdev) */ static int pci_read_irq_line(struct pci_dev *pci_dev) { - struct of_irq oirq; + struct of_phandle_args oirq; unsigned int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); @@ -237,7 +237,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ - if (of_irq_map_pci(pci_dev, &oirq)) { + if (of_irq_parse_pci(pci_dev, &oirq)) { u8 line, pin; /* If that fails, lets fallback to what is in the config @@ -263,11 +263,10 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", - oirq.size, oirq.specifier[0], oirq.specifier[1], - of_node_full_name(oirq.controller)); + oirq.args_count, oirq.args[0], oirq.args[1], + of_node_full_name(oirq.np)); - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); + virq = irq_create_of_mapping(&oirq); } if(virq == NO_IRQ) { pr_debug(" Failed to map !\n"); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 4368ec6fdc8c..ac0b034f9ae0 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -302,7 +302,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, struct device_node *dn) { struct pci_dev *dev = NULL; - const u32 *reg; + const __be32 *reg; int reglen, devfn; pr_debug(" * %s\n", dn->full_name); @@ -312,7 +312,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, reg = of_get_property(dn, "reg", ®len); if (reg == NULL || reglen < 20) return NULL; - devfn = (reg[0] >> 8) & 0xff; + devfn = (of_read_number(reg, 1) >> 8) & 0xff; /* Check if the PCI device is already there */ dev = pci_get_slot(bus, devfn); diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 21646dbe1bb3..3bd77edd7610 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -79,10 +79,12 @@ EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); +#ifndef CONFIG_GENERIC_CSUM EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(csum_tcpudp_magic); +#endif EXPORT_SYMBOL(__copy_tofrom_user); EXPORT_SYMBOL(__clear_user); @@ -98,9 +100,13 @@ EXPORT_SYMBOL(start_thread); #ifdef CONFIG_PPC_FPU EXPORT_SYMBOL(giveup_fpu); +EXPORT_SYMBOL(load_fp_state); +EXPORT_SYMBOL(store_fp_state); #endif #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL(giveup_altivec); +EXPORT_SYMBOL(load_vr_state); +EXPORT_SYMBOL(store_vr_state); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX EXPORT_SYMBOL(giveup_vsx); @@ -143,10 +149,14 @@ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); int __ucmpdi2(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ucmpdi2); +int __cmpdi2(long long, long long); +EXPORT_SYMBOL(__cmpdi2); #endif long long __bswapdi2(long long); EXPORT_SYMBOL(__bswapdi2); +#ifdef __BIG_ENDIAN__ EXPORT_SYMBOL(memcpy); +#endif EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memcmp); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ec5ae55ba36a..75c2d1009985 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -597,12 +597,13 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { struct thread_struct *new_thread, *old_thread; - unsigned long flags; struct task_struct *last; #ifdef CONFIG_PPC_BOOK3S_64 struct ppc64_tlb_batch *batch; #endif + WARN_ON(!irqs_disabled()); + /* Back up the TAR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception @@ -722,8 +723,6 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ - local_irq_save(flags); - /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out @@ -743,8 +742,6 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ - local_irq_restore(flags); - return last; } @@ -1009,6 +1006,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.ptrace_bps[0] = NULL; #endif + p->thread.fp_save_area = NULL; +#ifdef CONFIG_ALTIVEC + p->thread.vr_save_area = NULL; +#endif + #ifdef CONFIG_PPC_STD_MMU_64 if (mmu_has_feature(MMU_FTR_SLB)) { unsigned long sp_vsid; @@ -1114,12 +1116,12 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) #ifdef CONFIG_VSX current->thread.used_vsr = 0; #endif - memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); - current->thread.fpscr.val = 0; + memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); + current->thread.fp_save_area = NULL; #ifdef CONFIG_ALTIVEC - memset(current->thread.vr, 0, sizeof(current->thread.vr)); - memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); - current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ + memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); + current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ + current->thread.vr_save_area = NULL; current->thread.vrsave = 0; current->thread.used_vr = 0; #endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index b7634ce41dbc..f3a47098fb8e 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -546,15 +546,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) memblock_add(base, size); } -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; -} -#endif - static void __init early_reserve_mem_dt(void) { unsigned long i, len, dt_root; @@ -761,37 +752,6 @@ void __init early_init_devtree(void *params) *******/ /** - * of_find_next_cache_node - Find a node's subsidiary cache - * @np: node of type "cpu" or "cache" - * - * Returns a node pointer with refcount incremented, use - * of_node_put() on it when done. Caller should hold a reference - * to np. - */ -struct device_node *of_find_next_cache_node(struct device_node *np) -{ - struct device_node *child; - const phandle *handle; - - handle = of_get_property(np, "l2-cache", NULL); - if (!handle) - handle = of_get_property(np, "next-level-cache", NULL); - - if (handle) - return of_find_node_by_phandle(*handle); - - /* OF on pmac has nodes instead of properties named "l2-cache" - * beneath CPU nodes. - */ - if (!strcmp(np->type, "cpu")) - for_each_child_of_node(np, child) - if (!strcmp(child->type, "cache")) - return child; - - return NULL; -} - -/** * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device * @np: device node of the device * diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 5fe2842e8bab..cb64a6e1dc51 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -858,7 +858,8 @@ static void __init prom_send_capabilities(void) { ihandle root; prom_arg_t ret; - __be32 *cores; + u32 cores; + unsigned char *ptcores; root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { @@ -868,15 +869,30 @@ static void __init prom_send_capabilities(void) * (we assume this is the same for all cores) and use it to * divide NR_CPUS. */ - cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; - if (be32_to_cpup(cores) != NR_CPUS) { + + /* The core value may start at an odd address. If such a word + * access is made at a cache line boundary, this leads to an + * exception which may not be handled at this time. + * Forcing a per byte access to avoid exception. + */ + ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; + cores = 0; + cores |= ptcores[0] << 24; + cores |= ptcores[1] << 16; + cores |= ptcores[2] << 8; + cores |= ptcores[3]; + if (cores != NR_CPUS) { prom_printf("WARNING ! " "ibm_architecture_vec structure inconsistent: %lu!\n", - be32_to_cpup(cores)); + cores); } else { - *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads())); + cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", - be32_to_cpup(cores), NR_CPUS); + cores, NR_CPUS); + ptcores[0] = (cores >> 24) & 0xff; + ptcores[1] = (cores >> 16) & 0xff; + ptcores[2] = (cores >> 8) & 0xff; + ptcores[3] = cores & 0xff; } /* try calling the ibm,client-architecture-support method */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ddaf1780879c..75fb40498b41 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, void *kbuf, void __user *ubuf) { #ifdef CONFIG_VSX - double buf[33]; + u64 buf[33]; int i; #endif flush_fp_to_thread(target); @@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, /* copy to local buffer then write that out */ for (i = 0; i < 32 ; i++) buf[i] = target->thread.TS_FPR(i); - memcpy(&buf[32], &target->thread.fpscr, sizeof(double)); + buf[32] = target->thread.fp_state.fpscr; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); #else - BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != - offsetof(struct thread_struct, TS_FPR(32))); + BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != + offsetof(struct thread_fp_state, fpr[32][0])); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpr, 0, -1); + &target->thread.fp_state, 0, -1); #endif } @@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { #ifdef CONFIG_VSX - double buf[33]; + u64 buf[33]; int i; #endif flush_fp_to_thread(target); @@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, return i; for (i = 0; i < 32 ; i++) target->thread.TS_FPR(i) = buf[i]; - memcpy(&target->thread.fpscr, &buf[32], sizeof(double)); + target->thread.fp_state.fpscr = buf[32]; return 0; #else - BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != - offsetof(struct thread_struct, TS_FPR(32))); + BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != + offsetof(struct thread_fp_state, fpr[32][0])); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpr, 0, -1); + &target->thread.fp_state, 0, -1); #endif } @@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, flush_altivec_to_thread(target); - BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != - offsetof(struct thread_struct, vr[32])); + BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != + offsetof(struct thread_vr_state, vr[32])); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.vr, 0, + &target->thread.vr_state, 0, 33 * sizeof(vector128)); if (!ret) { /* @@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, flush_altivec_to_thread(target); - BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != - offsetof(struct thread_struct, vr[32])); + BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != + offsetof(struct thread_vr_state, vr[32])); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.vr, 0, 33 * sizeof(vector128)); + &target->thread.vr_state, 0, + 33 * sizeof(vector128)); if (!ret && count > 0) { /* * We use only the first word of vrsave. @@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - double buf[32]; + u64 buf[32]; int ret, i; flush_vsx_to_thread(target); for (i = 0; i < 32 ; i++) - buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET]; + buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); @@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - double buf[32]; + u64 buf[32]; int ret,i; flush_vsx_to_thread(target); @@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); for (i = 0; i < 32 ; i++) - target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; + target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return ret; @@ -657,7 +658,7 @@ static const struct user_regset native_regsets[] = { #endif #ifdef CONFIG_SPE [REGSET_SPE] = { - .n = 35, + .core_note_type = NT_PPC_SPE, .n = 35, .size = sizeof(u32), .align = sizeof(u32), .active = evr_active, .get = evr_get, .set = evr_set }, @@ -894,7 +895,7 @@ void user_disable_single_step(struct task_struct *task) * And, after doing so, if all debug flags are off, turn * off DBCR0(IDM) and MSR(DE) .... Torez */ - task->thread.debug.dbcr0 &= ~DBCR0_IC; + task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT); /* * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. */ @@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - tmp = ((unsigned long *)child->thread.fpr) - [fpidx * TS_FPRWIDTH]; + memcpy(&tmp, &child->thread.fp_state.fpr, + sizeof(long)); else - tmp = child->thread.fpscr.val; + tmp = child->thread.fp_state.fpscr; } ret = put_user(tmp, datalp); break; @@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - ((unsigned long *)child->thread.fpr) - [fpidx * TS_FPRWIDTH] = data; + memcpy(&child->thread.fp_state.fpr, &data, + sizeof(long)); else - child->thread.fpscr.val = data; + child->thread.fp_state.fpscr = data; ret = 0; } break; diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 18c7c65ea46d..f52b7db327c8 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -43,7 +43,6 @@ #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) #define FPRHALF(i) (((i) - PT_FPR0) & 1) #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) -#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) @@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ - tmp = ((unsigned int *)child->thread.fpr) + tmp = ((unsigned int *)child->thread.fp_state.fpr) [FPRINDEX(index)]; } ret = put_user((unsigned int)tmp, (u32 __user *)data); @@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if (numReg >= PT_FPR0) { flush_fp_to_thread(child); /* get 64 bit FPR */ - tmp = ((u64 *)child->thread.fpr) - [FPRINDEX_3264(numReg)]; + tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0]; } else { /* register within PT_REGS struct */ unsigned long tmp2; ret = ptrace_get_reg(child, numReg, &tmp2); @@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ - ((unsigned int *)child->thread.fpr) + ((unsigned int *)child->thread.fp_state.fpr) [FPRINDEX(index)] = data; ret = 0; } @@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, u64 *tmp; flush_fp_to_thread(child); /* get 64 bit FPR ... */ - tmp = &(((u64 *)child->thread.fpr) - [FPRINDEX_3264(numReg)]); + tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0]; /* ... write the 32 bit part we want */ ((u32 *)tmp)[index % 2] = data; ret = 0; diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 6e7b7cdeec65..7d4c7172f38e 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -223,7 +223,7 @@ unsigned long get_phb_buid(struct device_node *phb) static int phb_set_bus_ranges(struct device_node *dev, struct pci_controller *phb) { - const int *bus_range; + const __be32 *bus_range; unsigned int len; bus_range = of_get_property(dev, "bus-range", &len); @@ -231,8 +231,8 @@ static int phb_set_bus_ranges(struct device_node *dev, return 1; } - phb->first_busno = bus_range[0]; - phb->last_busno = bus_range[1]; + phb->first_busno = be32_to_cpu(bus_range[0]); + phb->last_busno = be32_to_cpu(bus_range[1]); return 0; } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 3d261c071fc8..febc80445d25 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -62,8 +62,6 @@ #include <mm/mmu_decl.h> #include <asm/fadump.h> -#include "setup.h" - #ifdef DEBUG #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h deleted file mode 100644 index 4c67ad7fae08..000000000000 --- a/arch/powerpc/kernel/setup.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _POWERPC_KERNEL_SETUP_H -#define _POWERPC_KERNEL_SETUP_H - -void check_for_initrd(void); -void do_init_bootmem(void); -void setup_panic(void); -extern int do_early_xmon; - -#endif /* _POWERPC_KERNEL_SETUP_H */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a4bbcae72578..b903dc5cf944 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -40,8 +40,6 @@ #include <asm/mmu_context.h> #include <asm/epapr_hcalls.h> -#include "setup.h" - #define DBG(fmt...) extern void bootx_init(unsigned long r4, unsigned long phys); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 278ca93e1f28..4085aaa9478f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -68,8 +68,6 @@ #include <asm/hugetlb.h> #include <asm/epapr_hcalls.h> -#include "setup.h" - #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3f220d93c72f..749778e0a69d 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -265,27 +265,27 @@ struct rt_sigframe { unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_FPR(i); - memcpy(&buf[i], &task->thread.fpscr, sizeof(double)); + buf[i] = task->thread.fp_state.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_FPR(i) = buf[i]; - memcpy(&task->thread.fpscr, &buf[i], sizeof(double)); + task->thread.fp_state.fpscr = buf[i]; return 0; } @@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task, unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) - buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET]; + buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) - task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; + task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } @@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task, unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_TRANS_FPR(i); - memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double)); + buf[i] = task->thread.transact_fp.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_TRANS_FPR(i) = buf[i]; - memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double)); + task->thread.transact_fp.fpscr = buf[i]; return 0; } @@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task, unsigned long copy_transact_vsx_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) - buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET]; + buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_transact_vsx_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) - task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i]; + task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ @@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task, inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { - return __copy_to_user(to, task->thread.fpr, + return __copy_to_user(to, task->thread.fp_state.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { - return __copy_from_user(task->thread.fpr, from, + return __copy_from_user(task->thread.fp_state.fpr, from, ELF_NFPREG * sizeof(double)); } @@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task, inline unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { - return __copy_to_user(to, task->thread.transact_fpr, + return __copy_to_user(to, task->thread.transact_fp.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { - return __copy_from_user(task->thread.transact_fpr, from, + return __copy_from_user(task->thread.transact_fp.fpr, from, ELF_NFPREG * sizeof(double)); } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ @@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); - if (__copy_to_user(&frame->mc_vregs, current->thread.vr, + if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that @@ -534,17 +534,17 @@ static int save_tm_user_regs(struct pt_regs *regs, /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); - if (__copy_to_user(&frame->mc_vregs, current->thread.vr, + if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; if (msr & MSR_VEC) { if (__copy_to_user(&tm_frame->mc_vregs, - current->thread.transact_vr, + ¤t->thread.transact_vr, ELF_NVRREG * sizeof(vector128))) return 1; } else { if (__copy_to_user(&tm_frame->mc_vregs, - current->thread.vr, + ¤t->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; } @@ -692,11 +692,12 @@ static long restore_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ - if (__copy_from_user(current->thread.vr, &sr->mc_vregs, + if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) - memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); + memset(¤t->thread.vr_state, 0, + ELF_NVRREG * sizeof(vector128)); /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) @@ -722,7 +723,7 @@ static long restore_user_regs(struct pt_regs *regs, return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif /* CONFIG_VSX */ /* * force the process to reload the FP registers from @@ -798,15 +799,16 @@ static long restore_tm_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ - if (__copy_from_user(current->thread.vr, &sr->mc_vregs, + if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs)) || - __copy_from_user(current->thread.transact_vr, + __copy_from_user(¤t->thread.transact_vr, &tm_sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) { - memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); - memset(current->thread.transact_vr, 0, + memset(¤t->thread.vr_state, 0, + ELF_NVRREG * sizeof(vector128)); + memset(¤t->thread.transact_vr, 0, ELF_NVRREG * sizeof(vector128)); } @@ -838,8 +840,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) { - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; - current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } #endif /* CONFIG_VSX */ @@ -891,7 +893,7 @@ static long restore_tm_user_regs(struct pt_regs *regs, #endif #ifdef CONFIG_PPC64 -int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) +int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) { int err; @@ -1030,7 +1032,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, if (__put_user(0, &rt_sf->uc.uc_link)) goto badframe; - current->thread.fpscr.val = 0; /* turn off all fp exceptions */ + current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); @@ -1045,8 +1047,9 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, regs->gpr[5] = (unsigned long) &rt_sf->uc; regs->gpr[6] = (unsigned long) rt_sf; regs->nip = (unsigned long) ka->sa.sa_handler; - /* enter the signal handler in big-endian mode */ + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; + regs->msr |= (MSR_KERNEL & MSR_LE); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we @@ -1462,7 +1465,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, regs->link = tramp; - current->thread.fpscr.val = 0; /* turn off all fp exceptions */ + current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index f93ec2835a13..b3c615764c9b 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ - err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); + err |= __copy_to_user(v_regs, ¤t->thread.vr_state, + 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) * contains valid data. */ @@ -195,18 +196,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ - err |= __copy_to_user(v_regs, current->thread.vr, + err |= __copy_to_user(v_regs, ¤t->thread.vr_state, 33 * sizeof(vector128)); /* If VEC was enabled there are transactional VRs valid too, * else they're a copy of the checkpointed VRs. */ if (msr & MSR_VEC) err |= __copy_to_user(tm_v_regs, - current->thread.transact_vr, + ¤t->thread.transact_vr, 33 * sizeof(vector128)); else err |= __copy_to_user(tm_v_regs, - current->thread.vr, + ¤t->thread.vr_state, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate @@ -349,10 +350,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && (msr & MSR_VEC) != 0) - err |= __copy_from_user(current->thread.vr, v_regs, + err |= __copy_from_user(¤t->thread.vr_state, v_regs, 33 * sizeof(vector128)); else if (current->thread.used_vr) - memset(current->thread.vr, 0, 33 * sizeof(vector128)); + memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); /* Always get VRSAVE back */ if (v_regs != NULL) err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); @@ -374,7 +375,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, err |= copy_vsx_from_user(current, v_regs); else for (i = 0; i < 32 ; i++) - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif return err; } @@ -468,14 +469,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { - err |= __copy_from_user(current->thread.vr, v_regs, + err |= __copy_from_user(¤t->thread.vr_state, v_regs, 33 * sizeof(vector128)); - err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, + err |= __copy_from_user(¤t->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); } else if (current->thread.used_vr) { - memset(current->thread.vr, 0, 33 * sizeof(vector128)); - memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); + memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); + memset(¤t->thread.transact_vr, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL && tm_v_regs != NULL) { @@ -507,8 +508,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, err |= copy_transact_vsx_from_user(current, tm_v_regs); } else { for (i = 0; i < 32 ; i++) { - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; - current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } } #endif @@ -747,7 +748,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ - current->thread.fpscr.val = 0; + current->thread.fp_state.fpscr = 0; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we @@ -773,8 +774,9 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, /* Set up "regs" so we "return" to the signal handler. */ err |= get_user(regs->nip, &funct_desc_ptr->entry); - /* enter the signal handler in big-endian mode */ + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; + regs->msr |= (MSR_KERNEL & MSR_LE); regs->gpr[1] = newsp; err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); regs->gpr[3] = signr; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8e59abc237d7..930cd8af3503 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -844,18 +844,6 @@ void __cpu_die(unsigned int cpu) smp_ops->cpu_die(cpu); } -static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); - -void cpu_hotplug_driver_lock() -{ - mutex_lock(&powerpc_cpu_hotplug_driver_mutex); -} - -void cpu_hotplug_driver_unlock() -{ - mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); -} - void cpu_die(void) { if (ppc_md.cpu_die) diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 22045984835f..988f38dced0f 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -114,7 +114,9 @@ _GLOBAL(swsusp_arch_suspend) SAVE_SPECIAL(MSR) SAVE_SPECIAL(XER) #ifdef CONFIG_PPC_BOOK3S_64 +BEGIN_FW_FTR_SECTION SAVE_SPECIAL(SDR1) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else SAVE_SPR(TCR) @@ -231,7 +233,9 @@ nothing_to_copy: /* can't use RESTORE_SPECIAL(MSR) */ ld r0, SL_MSR(r11) mtmsrd r0, 0 +BEGIN_FW_FTR_SECTION RESTORE_SPECIAL(SDR1) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else /* Restore SPRG1, be used to save paca */ ld r0, SL_SPRG1(r11) diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index cd809eaa8b5c..ef47bcbd4352 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -12,16 +12,15 @@ #include <asm/reg.h> #ifdef CONFIG_VSX -/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */ -#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ +/* See fpu.S, this is borrowed from there */ +#define __SAVE_32FPRS_VSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ - SAVE_32FPRS_TRANSACT(n,base); \ + SAVE_32FPRS(n,base); \ b 3f; \ -2: SAVE_32VSRS_TRANSACT(n,c,base); \ +2: SAVE_32VSRS(n,c,base); \ 3: -/* ...and this is just plain borrowed from there. */ #define __REST_32FPRS_VSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ @@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: REST_32VSRS(n,c,base); \ 3: #else -#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base) -#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) +#define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base) +#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) #endif -#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ - __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base) +#define SAVE_32FPRS_VSRS(n,c,base) \ + __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base) #define REST_32FPRS_VSRS(n,c,base) \ __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) @@ -107,7 +106,7 @@ DSCR_DEFAULT: _GLOBAL(tm_reclaim) mfcr r6 mflr r0 - std r6, 8(r1) + stw r6, 8(r1) std r0, 16(r1) std r2, 40(r1) stdu r1, -TM_FRAME_SIZE(r1) @@ -157,10 +156,11 @@ _GLOBAL(tm_reclaim) andis. r0, r4, MSR_VEC@h beq dont_backup_vec - SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */ + addi r7, r3, THREAD_TRANSACT_VRSTATE + SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ mfvscr vr0 - li r6, THREAD_TRANSACT_VSCR - stvx vr0, r3, r6 + li r6, VRSTATE_VSCR + stvx vr0, r7, r6 dont_backup_vec: mfspr r0, SPRN_VRSAVE std r0, THREAD_TRANSACT_VRSAVE(r3) @@ -168,10 +168,11 @@ dont_backup_vec: andi. r0, r4, MSR_FP beq dont_backup_fp - SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */ + addi r7, r3, THREAD_TRANSACT_FPSTATE + SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */ mffs fr0 - stfd fr0,THREAD_TRANSACT_FPSCR(r3) + stfd fr0,FPSTATE_FPSCR(r7) dont_backup_fp: /* The moment we treclaim, ALL of our GPRs will switch @@ -284,7 +285,7 @@ dont_backup_fp: REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE - ld r4, 8(r1) + lwz r4, 8(r1) ld r0, 16(r1) mtcr r4 mtlr r0 @@ -309,7 +310,7 @@ dont_backup_fp: _GLOBAL(tm_recheckpoint) mfcr r5 mflr r0 - std r5, 8(r1) + stw r5, 8(r1) std r0, 16(r1) std r2, 40(r1) stdu r1, -TM_FRAME_SIZE(r1) @@ -358,10 +359,11 @@ _GLOBAL(tm_recheckpoint) andis. r0, r4, MSR_VEC@h beq dont_restore_vec - li r5, THREAD_VSCR - lvx vr0, r3, r5 + addi r8, r3, THREAD_VRSTATE + li r5, VRSTATE_VSCR + lvx vr0, r8, r5 mtvscr vr0 - REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ + REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ dont_restore_vec: ld r5, THREAD_VRSAVE(r3) mtspr SPRN_VRSAVE, r5 @@ -370,9 +372,10 @@ dont_restore_vec: andi. r0, r4, MSR_FP beq dont_restore_fp - lfd fr0, THREAD_FPSCR(r3) + addi r8, r3, THREAD_FPSTATE + lfd fr0, FPSTATE_FPSCR(r8) MTFSF_L(fr0) - REST_32FPRS_VSRS(0, R4, R3) + REST_32FPRS_VSRS(0, R4, R8) dont_restore_fp: mtmsr r6 /* FP/Vec off again! */ @@ -441,7 +444,7 @@ restore_gprs: REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE - ld r4, 8(r1) + lwz r4, 8(r1) ld r0, 16(r1) mtcr r4 mtlr r0 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 4f5df4e7df12..907a472f9a9e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs) flush_fp_to_thread(current); - code = __parse_fpscr(current->thread.fpscr.val); + code = __parse_fpscr(current->thread.fp_state.fpscr); _exception(SIGFPE, regs, code, regs->nip); } @@ -1018,6 +1018,13 @@ static int emulate_instruction(struct pt_regs *regs) return emulate_isel(regs, instword); } + /* Emulate sync instruction variants */ + if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { + PPC_WARN_EMULATED(sync, regs); + asm volatile("sync"); + return 0; + } + #ifdef CONFIG_PPC64 /* Emulate the mfspr rD, DSCR. */ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == @@ -1069,7 +1076,7 @@ static int emulate_math(struct pt_regs *regs) return 0; case 1: { int code = 0; - code = __parse_fpscr(current->thread.fpscr.val); + code = __parse_fpscr(current->thread.fp_state.fpscr); _exception(SIGFPE, regs, code, regs->nip); return 0; } @@ -1371,8 +1378,6 @@ void facility_unavailable_exception(struct pt_regs *regs) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -extern void do_load_up_fpu(struct pt_regs *regs); - void fp_unavailable_tm(struct pt_regs *regs) { /* Note: This does not handle any kind of FP laziness. */ @@ -1403,8 +1408,6 @@ void fp_unavailable_tm(struct pt_regs *regs) } #ifdef CONFIG_ALTIVEC -extern void do_load_up_altivec(struct pt_regs *regs); - void altivec_unavailable_tm(struct pt_regs *regs) { /* See the comments in fp_unavailable_tm(). This function operates @@ -1465,7 +1468,8 @@ void SoftwareEmulation(struct pt_regs *regs) if (!user_mode(regs)) { debugger(regs); - die("Kernel Mode Software FPU Emulation", regs, SIGFPE); + die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", + regs, SIGFPE); } if (!emulate_math(regs)) @@ -1635,7 +1639,7 @@ void altivec_assist_exception(struct pt_regs *regs) /* XXX quick hack for now: set the non-Java bit in the VSCR */ printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " "in %s at %lx\n", current->comm, regs->nip); - current->thread.vscr.u[3] |= 0x10000; + current->thread.vr_state.vscr.u[3] |= 0x10000; } } #endif /* CONFIG_ALTIVEC */ @@ -1816,6 +1820,7 @@ struct ppc_emulated ppc_emulated = { WARN_EMULATED_SETUP(popcntb), WARN_EMULATED_SETUP(spe), WARN_EMULATED_SETUP(string), + WARN_EMULATED_SETUP(sync), WARN_EMULATED_SETUP(unaligned), #ifdef CONFIG_MATH_EMULATION WARN_EMULATED_SETUP(math), diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 1d9c92621b36..094e45c16a17 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -34,8 +34,7 @@ #include <asm/firmware.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> - -#include "setup.h" +#include <asm/setup.h> #undef DEBUG diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index f223409629b9..e58ee10fa5c0 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -4,7 +4,11 @@ */ #include <asm/vdso.h> +#ifdef __LITTLE_ENDIAN__ +OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle") +#else OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc") +#endif OUTPUT_ARCH(powerpc:common) ENTRY(_start) diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index e4863819663b..64fb183a47c2 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -4,7 +4,11 @@ */ #include <asm/vdso.h> +#ifdef __LITTLE_ENDIAN__ +OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle") +#else OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc") +#endif OUTPUT_ARCH(powerpc:common64) ENTRY(_start) diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index 604d0947cb20..c4bfadb2606b 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs) vb = (instr >> 11) & 0x1f; vc = (instr >> 6) & 0x1f; - vrs = current->thread.vr; + vrs = current->thread.vr_state.vr; switch (instr & 0x3f) { case 10: switch (vc) { @@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs) case 14: /* vctuxs */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, - ¤t->thread.vscr.u[3]); + ¤t->thread.vr_state.vscr.u[3]); break; case 15: /* vctsxs */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, - ¤t->thread.vscr.u[3]); + ¤t->thread.vr_state.vscr.u[3]); break; default: return -EINVAL; diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 9e20999aaef2..0458a9aaba9d 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -8,29 +8,6 @@ #include <asm/ptrace.h> #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -/* - * Wrapper to call load_up_altivec from C. - * void do_load_up_altivec(struct pt_regs *regs); - */ -_GLOBAL(do_load_up_altivec) - mflr r0 - std r0, 16(r1) - stdu r1, -112(r1) - - subi r6, r3, STACK_FRAME_OVERHEAD - /* load_up_altivec expects r12=MSR, r13=PACA, and returns - * with r12 = new MSR. - */ - ld r12,_MSR(r6) - GET_PACA(r13) - bl load_up_altivec - std r12,_MSR(r6) - - ld r0, 112+16(r1) - addi r1, r1, 112 - mtlr r0 - blr - /* void do_load_up_transact_altivec(struct thread_struct *thread) * * This is similar to load_up_altivec but for the transactional version of the @@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec) li r4,1 stw r4,THREAD_USED_VR(r3) - li r10,THREAD_TRANSACT_VSCR + li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR lvx vr0,r10,r3 mtvscr vr0 - REST_32VRS_TRANSACT(0,r4,r3) + addi r10,r3,THREAD_TRANSACT_VRSTATE + REST_32VRS(0,r4,r10) /* Disable VEC again. */ MTMSRD(r6) @@ -59,12 +37,36 @@ _GLOBAL(do_load_up_transact_altivec) #endif /* - * load_up_altivec(unused, unused, tsk) + * Load state from memory into VMX registers including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(load_vr_state) + li r4,VRSTATE_VSCR + lvx vr0,r4,r3 + mtvscr vr0 + REST_32VRS(0,r4,r3) + blr + +/* + * Store VMX state into memory, including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(store_vr_state) + SAVE_32VRS(0, r4, r3) + mfvscr vr0 + li r4, VRSTATE_VSCR + stvx vr0, r4, r3 + blr + +/* * Disable VMX for the task which had it previously, * and save its vector registers in its thread_struct. * Enables the VMX for use in the kernel on return. * On SMP we know the VMX is free, since we give it up every * switch (ie, no lazy save of the vector registers). + * + * Note that on 32-bit this can only use registers that will be + * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_altivec) mfmsr r5 /* grab the current MSR */ @@ -90,10 +92,11 @@ _GLOBAL(load_up_altivec) /* Save VMX state to last_task_used_altivec's THREAD struct */ toreal(r4) addi r4,r4,THREAD - SAVE_32VRS(0,r5,r4) + addi r6,r4,THREAD_VRSTATE + SAVE_32VRS(0,r5,r6) mfvscr vr0 - li r10,THREAD_VSCR - stvx vr0,r10,r4 + li r10,VRSTATE_VSCR + stvx vr0,r10,r6 /* Disable VMX for last_task_used_altivec */ PPC_LL r5,PT_REGS(r4) toreal(r5) @@ -125,12 +128,13 @@ _GLOBAL(load_up_altivec) oris r12,r12,MSR_VEC@h std r12,_MSR(r1) #endif + addi r6,r5,THREAD_VRSTATE li r4,1 - li r10,THREAD_VSCR + li r10,VRSTATE_VSCR stw r4,THREAD_USED_VR(r5) - lvx vr0,r10,r5 + lvx vr0,r10,r6 mtvscr vr0 - REST_32VRS(0,r4,r5) + REST_32VRS(0,r4,r6) #ifndef CONFIG_SMP /* Update last_task_used_altivec to 'current' */ subi r4,r5,THREAD /* Back to 'current' */ @@ -165,12 +169,16 @@ _GLOBAL(giveup_altivec) PPC_LCMPI 0,r3,0 beqlr /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ + PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) - PPC_LCMPI 0,r5,0 - SAVE_32VRS(0,r4,r3) + PPC_LCMPI 0,r7,0 + bne 2f + addi r7,r3,THREAD_VRSTATE +2: PPC_LCMPI 0,r5,0 + SAVE_32VRS(0,r4,r7) mfvscr vr0 - li r4,THREAD_VSCR - stvx vr0,r4,r3 + li r4,VRSTATE_VSCR + stvx vr0,r4,r7 beq 1f PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) #ifdef CONFIG_VSX diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index d38cc08b16c7..e7d0c88f621a 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -997,21 +997,36 @@ static struct device_attribute vio_cmo_dev_attrs[] = { /* sysfs bus functions and data structures for CMO */ #define viobus_cmo_rd_attr(name) \ -static ssize_t \ -viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \ +static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \ { \ return sprintf(buf, "%lu\n", vio_cmo.name); \ -} +} \ +static BUS_ATTR_RO(cmo_##name) #define viobus_cmo_pool_rd_attr(name, var) \ static ssize_t \ -viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \ +cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \ { \ return sprintf(buf, "%lu\n", vio_cmo.name.var); \ +} \ +static BUS_ATTR_RO(cmo_##name##_##var) + +viobus_cmo_rd_attr(entitled); +viobus_cmo_rd_attr(spare); +viobus_cmo_rd_attr(min); +viobus_cmo_rd_attr(desired); +viobus_cmo_rd_attr(curr); +viobus_cmo_pool_rd_attr(reserve, size); +viobus_cmo_pool_rd_attr(excess, size); +viobus_cmo_pool_rd_attr(excess, free); + +static ssize_t cmo_high_show(struct bus_type *bt, char *buf) +{ + return sprintf(buf, "%lu\n", vio_cmo.high); } -static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, - size_t count) +static ssize_t cmo_high_store(struct bus_type *bt, const char *buf, + size_t count) { unsigned long flags; @@ -1021,35 +1036,26 @@ static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, return count; } - -viobus_cmo_rd_attr(entitled); -viobus_cmo_pool_rd_attr(reserve, size); -viobus_cmo_pool_rd_attr(excess, size); -viobus_cmo_pool_rd_attr(excess, free); -viobus_cmo_rd_attr(spare); -viobus_cmo_rd_attr(min); -viobus_cmo_rd_attr(desired); -viobus_cmo_rd_attr(curr); -viobus_cmo_rd_attr(high); - -static struct bus_attribute vio_cmo_bus_attrs[] = { - __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), - __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL), - __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL), - __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL), - __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL), - __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL), - __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL), - __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL), - __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, - viobus_cmo_high_show, viobus_cmo_high_reset), - __ATTR_NULL +static BUS_ATTR_RW(cmo_high); + +static struct attribute *vio_bus_attrs[] = { + &bus_attr_cmo_entitled.attr, + &bus_attr_cmo_spare.attr, + &bus_attr_cmo_min.attr, + &bus_attr_cmo_desired.attr, + &bus_attr_cmo_curr.attr, + &bus_attr_cmo_high.attr, + &bus_attr_cmo_reserve_size.attr, + &bus_attr_cmo_excess_size.attr, + &bus_attr_cmo_excess_free.attr, + NULL, }; +ATTRIBUTE_GROUPS(vio_bus); static void vio_cmo_sysfs_init(void) { vio_bus_type.dev_attrs = vio_cmo_dev_attrs; - vio_bus_type.bus_attrs = vio_cmo_bus_attrs; + vio_bus_type.bus_groups = vio_bus_groups; } #else /* CONFIG_PPC_SMLPAR */ int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } @@ -1413,8 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* needed to ensure proper operation of coherent allocations * later, in case driver doesn't set it explicitly */ - dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); - dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); + dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64)); } /* register with generic device framework */ @@ -1531,12 +1536,12 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, dn = dev->of_node; if (!dn) { - strcat(buf, "\n"); + strcpy(buf, "\n"); return strlen(buf); } cp = of_get_property(dn, "compatible", NULL); if (!cp) { - strcat(buf, "\n"); + strcpy(buf, "\n"); return strlen(buf); } |