summaryrefslogtreecommitdiffstats
path: root/ArmPkg/Library/ArmMmuLib
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2019-01-07 08:15:01 +0100
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2019-01-29 11:24:02 +0100
commitd5788777bcc75936cc0e6acb540a5ee6ac77866b (patch)
tree3662febef191bf6e83feb297e1462169e03555c5 /ArmPkg/Library/ArmMmuLib
parentf34b38fae614c096d9c6afdc02b8448ff38134cd (diff)
downloadedk2-d5788777bcc75936cc0e6acb540a5ee6ac77866b.tar.gz
edk2-d5788777bcc75936cc0e6acb540a5ee6ac77866b.tar.bz2
edk2-d5788777bcc75936cc0e6acb540a5ee6ac77866b.zip
ArmPkg/ArmMmuLib AARCH64: get rid of needless TLB invalidation
Currently, we always invalidate the TLBs entirely after making any modification to the page tables. Now that we have introduced strict memory permissions in quite a number of places, such modifications occur much more often, and it is better for performance to flush only those TLB entries that are actually affected by the changes. At the same time, relax some system wide data synchronization barriers to non-shared. When running in UEFI, we don't share virtual address translations with other masters, unless we are running under virt, but in that case, the host will upgrade them as appropriate (by setting an override at EL2) Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Leif Lindholm <leif.lindholm@linaro.org>
Diffstat (limited to 'ArmPkg/Library/ArmMmuLib')
-rw-r--r--ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c16
-rw-r--r--ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S18
2 files changed, 17 insertions, 17 deletions
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
index d66df3e17a..3498f520e3 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
@@ -129,13 +129,14 @@ STATIC
VOID
ReplaceLiveEntry (
IN UINT64 *Entry,
- IN UINT64 Value
+ IN UINT64 Value,
+ IN UINT64 RegionStart
)
{
if (!ArmMmuEnabled ()) {
*Entry = Value;
} else {
- ArmReplaceLiveTranslationEntry (Entry, Value);
+ ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
}
}
@@ -296,7 +297,8 @@ GetBlockEntryListFromAddress (
// Fill the BlockEntry with the new TranslationTable
ReplaceLiveEntry (BlockEntry,
- ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
+ (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,
+ RegionStart);
}
} else {
if (IndexLevel != PageLevel) {
@@ -375,6 +377,8 @@ UpdateRegionMapping (
*BlockEntry &= BlockEntryMask;
*BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
+ ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
+
// Go to the next BlockEntry
RegionStart += BlockEntrySize;
RegionLength -= BlockEntrySize;
@@ -487,9 +491,6 @@ ArmSetMemoryAttributes (
return Status;
}
- // Invalidate all TLB entries so changes are synced
- ArmInvalidateTlb ();
-
return EFI_SUCCESS;
}
@@ -512,9 +513,6 @@ SetMemoryRegionAttribute (
return Status;
}
- // Invalidate all TLB entries so changes are synced
- ArmInvalidateTlb ();
-
return EFI_SUCCESS;
}
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
index 90192df24f..8b447e3d66 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
@@ -32,13 +32,14 @@
dmb sy
dc ivac, x0
- // flush the TLBs
+ // flush translations for the target address from the TLBs
+ lsr x2, x2, #12
.if \el == 1
- tlbi vmalle1
+ tlbi vaae1, x2
.else
- tlbi alle\el
+ tlbi vae\el, x2
.endif
- dsb sy
+ dsb nsh
// re-enable the MMU
msr sctlr_el\el, x8
@@ -48,19 +49,20 @@
//VOID
//ArmReplaceLiveTranslationEntry (
// IN UINT64 *Entry,
-// IN UINT64 Value
+// IN UINT64 Value,
+// IN UINT64 Address
// )
ASM_FUNC(ArmReplaceLiveTranslationEntry)
// disable interrupts
- mrs x2, daif
+ mrs x4, daif
msr daifset, #0xf
isb
// clean and invalidate first so that we don't clobber
// adjacent entries that are dirty in the caches
dc civac, x0
- dsb ish
+ dsb nsh
EL1_OR_EL2_OR_EL3(x3)
1:__replace_entry 1
@@ -69,7 +71,7 @@ ASM_FUNC(ArmReplaceLiveTranslationEntry)
b 4f
3:__replace_entry 3
-4:msr daif, x2
+4:msr daif, x4
ret
ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)