summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2023-09-25 20:31:44 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2023-10-19 17:12:46 +1100
commit48cf93bb168d506a8278a6fb25c2f88c1c93ce6e (patch)
tree5243af9b8254b0bccfdd9a5a6d64d4c3a332c0ed /arch/powerpc/kernel
parent8e9bd41e4ce1001f5b89e4c9a69f870f39d56c12 (diff)
downloadlinux-stable-48cf93bb168d506a8278a6fb25c2f88c1c93ce6e.tar.gz
linux-stable-48cf93bb168d506a8278a6fb25c2f88c1c93ce6e.tar.bz2
linux-stable-48cf93bb168d506a8278a6fb25c2f88c1c93ce6e.zip
powerpc/e500: Introduce _PAGE_READ and remove _PAGE_USER
e500 MMU has 6 page protection bits: - R, W, X for supervisor - R, W, X for user It means that it can support X without R. To do that, _PAGE_READ flag is needed. With 32 bits PTE there is no bit available for it in PTE. On the other hand the only real use of _PAGE_USER is to implement PAGE_NONE by clearing _PAGE_USER. As _PAGE_NONE can also be implemented by clearing _PAGE_READ, remove _PAGE_USER and add _PAGE_READ. Move _PAGE_PRESENT into bit 30 so that _PAGE_READ can match SR bit. With 64 bits PTE _PAGE_USER is already the combination of SR and UR so all we need to do is to rename it _PAGE_READ. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/0849ab6bf7ae2af23f94b0457fa40d0ea3983fe4.1695659959.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/head_85xx.S10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index 1b122cba557b..39724ff5ae1f 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -485,10 +485,10 @@ END_BTB_FLUSH_SECTION
*/
mfspr r12,SPRN_ESR
#ifdef CONFIG_PTE_64BIT
- li r13,_PAGE_PRESENT
+ li r13,_PAGE_PRESENT|_PAGE_BAP_SR
oris r13,r13,_PAGE_ACCESSED@h
#else
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_READ|_PAGE_ACCESSED
#endif
rlwimi r13,r12,11,29,29
@@ -783,15 +783,15 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_MAS7, r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
- li r10, (_PAGE_EXEC | _PAGE_PRESENT)
+ li r10, (_PAGE_EXEC | _PAGE_READ)
mr r13, r11
rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
and r12, r11, r10
- andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
+ mcrf cr0, cr5 /* Test for user page */
slwi r10, r12, 1
or r10, r10, r12
rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */
- iseleq r12, r12, r10
+ isellt r12, r10, r12
rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
mtspr SPRN_MAS3, r13
#endif