summaryrefslogtreecommitdiffstats
path: root/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-04-11 15:47:24 +0200
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2016-04-14 18:01:52 +0200
commit61b02ba1f2a3f80fa06f5006f0aea1572093a067 (patch)
tree5ec0390876e68b78879c59e5492b8247b8ba45db /ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
parentd354963f0dc350771167fa5d3c28b9de8d632d9c (diff)
downloadedk2-61b02ba1f2a3f80fa06f5006f0aea1572093a067.tar.gz
edk2-61b02ba1f2a3f80fa06f5006f0aea1572093a067.tar.bz2
edk2-61b02ba1f2a3f80fa06f5006f0aea1572093a067.zip
ArmPkg/AArch64Mmu: disable MMU during page table manipulations
On ARM, manipulating live page tables is cumbersome since the architecture mandates the use of break-before-make, i.e., replacing a block entry with a table entry requires an intermediate step via an invalid entry, or TLB conflicts may occur. Since it is not generally feasible to decide in the page table manipulation routines whether such an invalid entry will result in those routines themselves to become unavailable, use a function that is callable with the MMU off (i.e., a leaf function that does not access the stack) to perform the change of a block entry into a table entry. Note that the opposite should never occur, i.e., table entries are never coalesced into block entries. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Acked-by: Mark Rutland <mark.rutland@arm.com>
Diffstat (limited to 'ArmPkg/Library/ArmLib/AArch64/AArch64Support.S')
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Support.S62
1 files changed, 62 insertions, 0 deletions
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
index 1a3023b794..43f7a795ac 100644
--- a/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
@@ -56,6 +56,8 @@ GCC_ASM_EXPORT (ArmReadIdPfr1)
GCC_ASM_EXPORT (ArmWriteHcr)
GCC_ASM_EXPORT (ArmReadHcr)
GCC_ASM_EXPORT (ArmReadCurrentEL)
+GCC_ASM_EXPORT (ArmReplaceLiveTranslationEntry)
+GCC_ASM_EXPORT (ArmReplaceLiveTranslationEntrySize)
.set CTRL_M_BIT, (1 << 0)
.set CTRL_A_BIT, (1 << 1)
@@ -481,4 +483,64 @@ ASM_PFX(ArmReadCurrentEL):
mrs x0, CurrentEL
ret
+
+ .macro __replace_entry, el
+
+ // disable the MMU
+ mrs x8, sctlr_el\el
+ bic x9, x8, #CTRL_M_BIT
+ msr sctlr_el\el, x9
+ isb
+
+ // write updated entry
+ str x1, [x0]
+
+ // invalidate again to get rid of stale clean cachelines that may
+ // have been filled speculatively since the last invalidate
+ dmb sy
+ dc ivac, x0
+
+ // flush the TLBs
+ .if \el == 1
+ tlbi vmalle1
+ .else
+ tlbi alle\el
+ .endif
+ dsb sy
+
+ // re-enable the MMU
+ msr sctlr_el\el, x8
+ isb
+ .endm
+
+//VOID
+//ArmReplaceLiveTranslationEntry (
+// IN UINT64 *Entry,
+// IN UINT64 Value
+// )
+ASM_PFX(ArmReplaceLiveTranslationEntry):
+
+ // disable interrupts
+ mrs x2, daif
+ msr daifset, #0xf
+ isb
+
+ // clean and invalidate first so that we don't clobber
+ // adjacent entries that are dirty in the caches
+ dc civac, x0
+ dsb ish
+
+ EL1_OR_EL2_OR_EL3(x3)
+1:__replace_entry 1
+ b 4f
+2:__replace_entry 2
+ b 4f
+3:__replace_entry 3
+
+4:msr daif, x2
+ ret
+
+ASM_PFX(ArmReplaceLiveTranslationEntrySize):
+ .long . - ArmReplaceLiveTranslationEntry
+
ASM_FUNCTION_REMOVE_IF_UNREFERENCED