summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-10-30 09:46:06 +1100
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-11-01 07:15:09 -0500
commite701d269aa28996f3502780951fe1b12d5d66b49 (patch)
treea55db7df5755bf9c69f466432786de7e7e445ba8 /arch
parent57d75561be5496289601b2c94787ec38c718fcae (diff)
downloadlinux-e701d269aa28996f3502780951fe1b12d5d66b49.tar.gz
linux-e701d269aa28996f3502780951fe1b12d5d66b49.tar.bz2
linux-e701d269aa28996f3502780951fe1b12d5d66b49.zip
[POWERPC] 4xx: Fix 4xx flush_tlb_page()
On 4xx CPUs, the current implementation of flush_tlb_page() uses a low level _tlbie() assembly function that only works for the current PID. Thus, invalidations caused by, for example, a COW fault triggered by get_user_pages() from a different context will not work properly, causing among other things, gdb breakpoints to fail. This patch adds a "pid" argument to _tlbie() on 4xx processors, and uses it to flush entries in the right context. FSL BookE also gets the argument but it seems they don't need it (their tlbivax form ignores the PID when invalidating according to the document I have). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/misc_32.S23
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h4
-rw-r--r--arch/ppc/kernel/misc.S22
-rw-r--r--arch/ppc/mm/fault.c2
-rw-r--r--arch/ppc/mm/mmu_decl.h4
-rw-r--r--arch/ppc/platforms/4xx/ebony.c2
-rw-r--r--arch/ppc/platforms/4xx/ocotea.c2
-rw-r--r--arch/ppc/platforms/4xx/taishan.c2
9 files changed, 40 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8533de50347d..0ed2c7eddc9e 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -288,7 +288,16 @@ _GLOBAL(_tlbia)
*/
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -297,23 +306,23 @@ _GLOBAL(_tlbie)
tlbwe r3, r3, TLB_TAG
isync
10:
+
#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r5
+ mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r5,r6
+ andc r6,r4,r6
mtmsr r6
- mtspr SPRN_MMUCR,r4
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
- mtmsr r5
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a18fda361cc0..8135da06e0a4 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -309,7 +309,7 @@ good_area:
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
+ _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index c94a64fd3c01..eb3a732e91db 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -61,12 +61,12 @@ extern unsigned long total_lowmem;
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index a22e1f4d94c8..2b81e71d6b2d 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -224,7 +224,16 @@ _GLOBAL(_tlbia)
*/
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -234,22 +243,21 @@ _GLOBAL(_tlbie)
isync
10:
#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r5
+ mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r5,r6
+ andc r6,r4,r6
mtmsr r6
- mtspr SPRN_MMUCR,r4
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
- mtmsr r5
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index 254c23b755e6..36c0e7529edb 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -227,7 +227,7 @@ good_area:
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
+ _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/ppc/mm/mmu_decl.h b/arch/ppc/mm/mmu_decl.h
index 540f3292b229..f1d4f2109a99 100644
--- a/arch/ppc/mm/mmu_decl.h
+++ b/arch/ppc/mm/mmu_decl.h
@@ -54,12 +54,12 @@ extern unsigned int num_tlbcam_entries;
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 05d7184d7e14..453643a0eeea 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -236,7 +236,7 @@ ebony_early_serial_map(void)
gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */
- _tlbie(UART0_IO_BASE);
+ _tlbie(UART0_IO_BASE, 0);
#endif
port.membase = ioremap64(PPC440GP_UART1_ADDR, 8);
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index fd0f971881d6..28a712cd4800 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -259,7 +259,7 @@ ocotea_early_serial_map(void)
gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */
- _tlbie(UART0_IO_BASE);
+ _tlbie(UART0_IO_BASE, 0);
#endif
port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);
diff --git a/arch/ppc/platforms/4xx/taishan.c b/arch/ppc/platforms/4xx/taishan.c
index 888c492b4a45..f6a0c6650f33 100644
--- a/arch/ppc/platforms/4xx/taishan.c
+++ b/arch/ppc/platforms/4xx/taishan.c
@@ -316,7 +316,7 @@ taishan_early_serial_map(void)
gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */
- _tlbie(UART0_IO_BASE);
+ _tlbie(UART0_IO_BASE, 0);
#endif
port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);