summaryrefslogtreecommitdiffstats
path: root/ArmPkg
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-02-08 18:19:36 +0100
committermergify[bot] <37929162+mergify[bot]@users.noreply.github.com>2023-03-16 21:14:49 +0000
commitae2c904c3de9d42b70805e42c687de874a5c6d25 (patch)
treeec63761fc9f7bc004da157de6fd831da5abcb839 /ArmPkg
parentf07a9df9af60ad0afa9107cb582f4103cdcda1bc (diff)
downloadedk2-ae2c904c3de9d42b70805e42c687de874a5c6d25.tar.gz
edk2-ae2c904c3de9d42b70805e42c687de874a5c6d25.tar.bz2
edk2-ae2c904c3de9d42b70805e42c687de874a5c6d25.zip
ArmPkg/ArmMmuLib: Avoid splitting block entries if possible
Currently, the ARM MMU page table logic will break down any block entry that overlaps with the region being mapped, even if the block entry in question is using the same attributes as the new region. This means that creating a non-executable mapping inside a region that is already mapped non-executable at a coarser granularity may trigger a call to AllocatePages (), which may recurse back into the page table code to update the attributes on the newly allocated page tables. Let's avoid this, by preserving the block entry if it already covers the region being mapped with the correct attributes. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Leif Lindholm <quic_llindhol@quicinc.com>
Diffstat (limited to 'ArmPkg')
-rw-r--r--ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c10
-rw-r--r--ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c11
2 files changed, 21 insertions, 0 deletions
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
index 6d21a2e41d..1ce200c43c 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
@@ -252,6 +252,16 @@ UpdateRegionMappingRecursive (
if (!IsTableEntry (*Entry, Level)) {
//
+ // If the region we are trying to map is already covered by a block
+ // entry with the right attributes, don't bother splitting it up.
+ //
+ if (IsBlockEntry (*Entry, Level) &&
+ ((*Entry & TT_ATTRIBUTES_MASK & ~AttributeClearMask) == AttributeSetMask))
+ {
+ continue;
+ }
+
+ //
// No table entry exists yet, so we need to allocate a page table
// for the next level.
//
diff --git a/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c
index 247cf87bf3..299d38ad07 100644
--- a/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c
+++ b/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c
@@ -170,6 +170,17 @@ UpdatePageEntries (
// Does this descriptor need to be converted from section entry to 4K pages?
if (!TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE (Descriptor)) {
+ //
+ // If the section mapping covers the requested region with the expected
+ // attributes, splitting it is unnecessary, and should be avoided as it
+ // may result in unbounded recursion when using a strict NX policy.
+ //
+ if ((EntryValue & ~TT_DESCRIPTOR_PAGE_TYPE_MASK & EntryMask) ==
+ (ConvertSectionAttributesToPageAttributes (Descriptor) & EntryMask))
+ {
+ continue;
+ }
+
Status = ConvertSectionToPages (FirstLevelIdx << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
if (EFI_ERROR (Status)) {
// Exit for loop