diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-03-02 11:33:52 +1100 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-03-09 10:55:20 +1100 |
commit | d9ada91ae2969ae6b6dc3574fd08a6ebda5df766 (patch) | |
tree | a4509657892acb0b853760d6d26729a913a5fc2b | |
parent | 9424fabf8617c15e18a5ffd29bc3bcfa36620473 (diff) | |
download | linux-stable-d9ada91ae2969ae6b6dc3574fd08a6ebda5df766.tar.gz linux-stable-d9ada91ae2969ae6b6dc3574fd08a6ebda5df766.tar.bz2 linux-stable-d9ada91ae2969ae6b6dc3574fd08a6ebda5df766.zip |
powerpc: Replace mfmsr instructions with load from PACA kernel_msr field
On 64-bit, the mfmsr instruction can be quite slow, slower
than loading a field from the cache-hot PACA, which happens
to already contain the value we want in most cases.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/include/asm/exception-64s.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 5 |
4 files changed, 10 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 7f4718c4f04a..70354af0740e 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -298,7 +298,7 @@ label##_hv: \ /* Exception addition: Keep interrupt state */ #define ENABLE_INTS \ - mfmsr r11; \ + ld r11,PACAKMSR(r13); \ ld r12,_MSR(r1); \ rlwimi r11,r12,0,MSR_EE; \ mtmsrd r11,1 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 531ba00fcbab..6c6fa955baa7 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -68,8 +68,8 @@ static inline bool arch_irqs_disabled(void) #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); #else -#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) -#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) +#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) +#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) #endif #define hard_irq_disable() \ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index cc030b73174b..c513beb78b3b 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -557,10 +557,8 @@ _GLOBAL(ret_from_except_lite) #ifdef CONFIG_PPC_BOOK3E wrteei 0 #else - mfmsr r10 /* Get current interrupt state */ - rldicl r9,r10,48,1 /* clear MSR_EE */ - rotldi r9,r9,16 - mtmsrd r9,1 /* Update machine state */ + ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ + mtmsrd r10,1 /* Update machine state */ #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PREEMPT @@ -625,8 +623,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) * userspace and we take an exception after restoring r13, * we end up corrupting the userspace r13 value. */ - mfmsr r4 - andc r4,r4,r0 /* r0 contains MSR_RI here */ + ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */ + andc r4,r4,r0 /* r0 contains MSR_RI here */ mtmsrd r4,1 /* @@ -686,9 +684,7 @@ do_work: #ifdef CONFIG_PPC_BOOK3E wrteei 0 #else - mfmsr r10 - rldicl r10,r10,48,1 - rotldi r10,r10,16 + ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ li r0,0 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0fb42ae21694..02448ea58ad3 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -848,9 +848,8 @@ fast_exception_return: REST_GPR(0, r1) REST_8GPRS(2, r1) - mfmsr r10 - rldicl r10,r10,48,1 /* clear EE */ - rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ + ld r10,PACAKMSR(r13) + clrrdi r10,r10,2 /* clear RI */ mtmsrd r10,1 mtspr SPRN_SRR1,r12 |