diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 22 |
2 files changed, 19 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e2980a22c487..ca8c33b41989 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -874,6 +874,8 @@ static void tm_reclaim_thread(struct thread_struct *thr, if (!MSR_TM_SUSPENDED(mfmsr())) return; + giveup_all(container_of(thr, struct task_struct, thread)); + /* * If we are in a transaction and FP is off then we can't have * used FP inside that transaction. Hence the checkpointed @@ -893,8 +895,6 @@ static void tm_reclaim_thread(struct thread_struct *thr, memcpy(&thr->ckvr_state, &thr->vr_state, sizeof(struct thread_vr_state)); - giveup_all(container_of(thr, struct task_struct, thread)); - tm_reclaim(thr, thr->ckpt_regs.msr, cause); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 863c6858ed0f..e099e61c702b 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1663,6 +1663,12 @@ out: void fp_unavailable_tm(struct pt_regs *regs) { + /* + * Save the MSR now because tm_reclaim_current() is likely to + * change it + */ + unsigned long orig_msr = regs->msr; + /* Note: This does not handle any kind of FP laziness. */ TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", @@ -1687,10 +1693,10 @@ void fp_unavailable_tm(struct pt_regs *regs) * If VMX is in use, the VRs now hold checkpointed values, * so we don't want to load the VRs from the thread_struct. */ - tm_recheckpoint(¤t->thread, MSR_FP); + tm_recheckpoint(¤t->thread, orig_msr | MSR_FP); /* If VMX is in use, get the transactional values back */ - if (regs->msr & MSR_VEC) { + if (orig_msr & MSR_VEC) { msr_check_and_set(MSR_VEC); load_vr_state(¤t->thread.vr_state); /* At this point all the VSX state is loaded, so enable it */ @@ -1700,6 +1706,12 @@ void fp_unavailable_tm(struct pt_regs *regs) void altivec_unavailable_tm(struct pt_regs *regs) { + /* + * Save the MSR now because tm_reclaim_current() is likely to + * change it + */ + unsigned long orig_msr = regs->msr; + /* See the comments in fp_unavailable_tm(). This function operates * the same way. */ @@ -1709,10 +1721,10 @@ void altivec_unavailable_tm(struct pt_regs *regs) regs->nip, regs->msr); tm_reclaim_current(TM_CAUSE_FAC_UNAV); current->thread.load_vec = 1; - tm_recheckpoint(¤t->thread, MSR_VEC); + tm_recheckpoint(¤t->thread, orig_msr | MSR_VEC); current->thread.used_vr = 1; - if (regs->msr & MSR_FP) { + if (orig_msr & MSR_FP) { msr_check_and_set(MSR_FP); load_fp_state(¤t->thread.fp_state); regs->msr |= MSR_VSX; @@ -1751,7 +1763,7 @@ void vsx_unavailable_tm(struct pt_regs *regs) /* This loads & recheckpoints FP and VRs; but we have * to be sure not to overwrite previously-valid state. */ - tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); + tm_recheckpoint(¤t->thread, orig_msr | MSR_FP | MSR_VEC); msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC)); |