summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg
diff options
context:
space:
mode:
authorJian J Wang <jian.j.wang@intel.com>2018-10-24 11:21:04 +0800
committerJian J Wang <jian.j.wang@intel.com>2018-10-26 10:30:34 +0800
commit54efcfea8120d9a0595d0db6cd5c1dddd8dd61e1 (patch)
treef644b5c7e70559a9db652834855192bfe91446df /UefiCpuPkg
parent02ba936dfd6cdf670752ca3cb46fd0cdbf52d85c (diff)
downloadedk2-54efcfea8120d9a0595d0db6cd5c1dddd8dd61e1.tar.gz
edk2-54efcfea8120d9a0595d0db6cd5c1dddd8dd61e1.tar.bz2
edk2-54efcfea8120d9a0595d0db6cd5c1dddd8dd61e1.zip
UefiCpuPkg/CpuDxe: prevent recursive calling of InitializePageTablePool
The freed-memory guard feature will cause a recursive calling of InitializePageTablePool(). This is due to a fact that AllocateAlignedPages() is used to allocate page table pool memory. This function will most likely call gBS->FreePages to free unaligned pages and then cause another round of page attributes change, like below (freed pages will be always marked not-present if freed-memory guard is enabled) FreePages() <===============| => CpuSetMemoryAttributes() | => <if out of page table> | => InitializePageTablePool() | => AllocateAlignedPages() | => FreePages() ================| The solution is add a global variable as a lock in page table pool allocation function and fail any other requests if it has not been done. Since this issue will only happen if free-memory guard is enabled, it won't affect CpuSetMemoryAttributes() in default build of a BIOS. If free-memory guard is enabled, it only affect the pages (failed to mark them as not-present) freed in AllocateAlignedPages(). Since those freed pages haven't been used yet (their addresses not yet exposed to code outside AllocateAlignedPages), it won't compromise the freed-memory guard feature. This change will just fail the CpuSetMemoryAttributes() called from FreePages() but it won't fail the FreePages(). So the error status won't be propagated upper layer of code. Cc: Laszlo Ersek <lersek@redhat.com> Cc: Star Zeng <star.zeng@intel.com> Cc: Michael D Kinney <michael.d.kinney@intel.com> Cc: Jiewen Yao <jiewen.yao@intel.com> Cc: Ruiyu Ni <ruiyu.ni@intel.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang <jian.j.wang@intel.com> Reviewed-by: Eric Dong <eric.dong@intel.com>
Diffstat (limited to 'UefiCpuPkg')
-rw-r--r--UefiCpuPkg/CpuDxe/CpuPageTable.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/UefiCpuPkg/CpuDxe/CpuPageTable.c b/UefiCpuPkg/CpuDxe/CpuPageTable.c
index 33e8ee2d2c..4bee8c7772 100644
--- a/UefiCpuPkg/CpuDxe/CpuPageTable.c
+++ b/UefiCpuPkg/CpuDxe/CpuPageTable.c
@@ -100,6 +100,7 @@ PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
};
PAGE_TABLE_POOL *mPageTablePool = NULL;
+BOOLEAN mPageTablePoolLock = FALSE;
PAGE_TABLE_LIB_PAGING_CONTEXT mPagingContext;
EFI_SMM_BASE2_PROTOCOL *mSmmBase2 = NULL;
@@ -1047,6 +1048,16 @@ InitializePageTablePool (
BOOLEAN IsModified;
//
+ // Do not allow re-entrance.
+ //
+ if (mPageTablePoolLock) {
+ return FALSE;
+ }
+
+ mPageTablePoolLock = TRUE;
+ IsModified = FALSE;
+
+ //
// Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
// header.
//
@@ -1056,9 +1067,15 @@ InitializePageTablePool (
Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
if (Buffer == NULL) {
DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
- return FALSE;
+ goto Done;
}
+ DEBUG ((
+ DEBUG_INFO,
+ "Paging: added %lu pages to page table pool\r\n",
+ (UINT64)PoolPages
+ ));
+
//
// Link all pools into a list for easier track later.
//
@@ -1092,7 +1109,9 @@ InitializePageTablePool (
);
ASSERT (IsModified == TRUE);
- return TRUE;
+Done:
+ mPageTablePoolLock = FALSE;
+ return IsModified;
}
/**