summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-03-07 14:48:35 -0600
committerScott Wood <scottwood@freescale.com>2014-03-19 19:57:13 -0500
commit82d86de25b9c99db546e17c6f7ebf9a691da557e (patch)
tree3d977f76fa266367c7faa95567bc72e199a73db5 /arch/powerpc
parentc4787d1ecfefce86971c1360ed5cef36af6182db (diff)
downloadlinux-82d86de25b9c99db546e17c6f7ebf9a691da557e.tar.gz
linux-82d86de25b9c99db546e17c6f7ebf9a691da557e.tar.bz2
linux-82d86de25b9c99db546e17c6f7ebf9a691da557e.zip
powerpc/e6500: Make TLB lock recursive
Once special level interrupts are supported, we may take nested TLB misses -- so allow the same thread to acquire the lock recursively. The lock will not be effective against the nested TLB miss handler trying to write the same entry as the interrupted TLB miss handler, but that's also a problem on non-threaded CPUs that lack TLB write conditional. This will be addressed in the patch that enables crit/mc support by invalidating the TLB on return from level exceptions. Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h9
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S19
3 files changed, 20 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 89b785d16846..901dac6b6cb7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -287,11 +287,14 @@ extern int mmu_linear_psize;
extern int mmu_vmemmap_psize;
struct tlb_core_data {
+ /*
+ * Per-core spinlock for e6500 TLB handlers (no tlbsrx.)
+ * Must be the first struct element.
+ */
+ u8 lock;
+
/* For software way selection, as on Freescale TLB1 */
u8 esel_next, esel_max, esel_first;
-
- /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
- u8 lock;
};
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index da9c42f53bb1..4933909cc5c0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -102,6 +102,8 @@ static void setup_tlb_core_data(void)
{
int cpu;
+ BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
+
for_each_possible_cpu(cpu) {
int first = cpu_first_thread_sibling(cpu);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 6bf50507a4b5..1e50249e900b 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -284,7 +284,7 @@ itlb_miss_fault_bolted:
* r14 = page table base
* r13 = PACA
* r11 = tlb_per_core ptr
- * r10 = crap (free to use)
+ * r10 = cpu number
*/
tlb_miss_common_e6500:
/*
@@ -293,15 +293,18 @@ tlb_miss_common_e6500:
*
* MAS6:IND should be already set based on MAS4
*/
- addi r10,r11,TCD_LOCK
-1: lbarx r15,0,r10
+1: lbarx r15,0,r11
+ lhz r10,PACAPACAINDEX(r13)
cmpdi r15,0
+ cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */
bne 2f
- li r15,1
- stbcx. r15,0,r10
+ stbcx. r10,0,r11
bne 1b
+3:
.subsection 1
-2: lbz r15,0(r10)
+2: cmpd cr1,r15,r10 /* recursive lock due to mcheck/crit/etc? */
+ beq cr1,3b /* unlock will happen if cr1.eq = 0 */
+ lbz r15,0(r11)
cmpdi r15,0
bne 2b
b 1b
@@ -379,9 +382,11 @@ tlb_miss_common_e6500:
tlb_miss_done_e6500:
.macro tlb_unlock_e6500
+ beq cr1,1f /* no unlock if lock was recursively grabbed */
li r15,0
isync
- stb r15,TCD_LOCK(r11)
+ stb r15,0(r11)
+1:
.endm
tlb_unlock_e6500