diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2007-10-29 12:05:18 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-10-29 14:34:14 +1100 |
commit | f6ab0b922c3423b88c0e6e3e2c5fc1e58d83055d (patch) | |
tree | fd1eedbaed6a5d837f319a6ba9010fbba80560da | |
parent | 2a397e82c7db18019e408f953dd58dc1963a328c (diff) | |
download | linux-f6ab0b922c3423b88c0e6e3e2c5fc1e58d83055d.tar.gz linux-f6ab0b922c3423b88c0e6e3e2c5fc1e58d83055d.tar.bz2 linux-f6ab0b922c3423b88c0e6e3e2c5fc1e58d83055d.zip |
[POWERPC] powerpc: Fix demotion of segments to 4K pages
When demoting a process to use 4K HW pages (instead of 64K), which
happens under various circumstances such as doing cache inhibited
mappings on machines that do not support 64K CI pages, the assembly
hash code calls back into the C function flush_hash_page(). This
function prototype was recently changed to accomodate for 1T segments
but the assembly call site was not updated, causing applications that
do demotion to hang. In addition, when updating the per-CPU PACA for
the new sizes, we didn't properly update the slice "map", thus causing
the SLB miss code to re-insert segments for the wrong size.
This fixes both and adds a warning comment next to the C
implementation to try to avoid problems next time someone changes it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 6 |
2 files changed, 7 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index ad253b959030..e935edd6b72b 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -331,7 +331,7 @@ htab_pte_insert_failure: *****************************************************************************/ /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, int local) + * pte_t *ptep, unsigned long trap, int local, int ssize) */ /* @@ -557,7 +557,8 @@ htab_inval_old_hpte: mr r4,r31 /* PTE.pte */ li r5,0 /* PTE.hidx */ li r6,MMU_PAGE_64K /* psize */ - ld r7,STK_PARM(r8)(r1) /* local */ + ld r7,STK_PARM(r9)(r1) /* ssize */ + ld r8,STK_PARM(r8)(r1) /* local */ bl .flush_hash_page b htab_insert_pte diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index c78dc912411f..c5a603fdb22d 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -791,8 +791,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) } if (user_region) { if (psize != get_paca()->context.user_psize) { - get_paca()->context.user_psize = - mm->context.user_psize; + get_paca()->context = mm->context; slb_flush_and_rebolt(); } } else if (get_paca()->vmalloc_sllp != @@ -885,6 +884,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, local_irq_restore(flags); } +/* WARNING: This is called from hash_low_64.S, if you change this prototype, + * do not forget to update the assembly call site ! + */ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, int local) { |