summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm
diff options
context:
space:
mode:
authorRay Ni <ray.ni@intel.com>2019-09-06 06:18:47 +0800
committerRay Ni <ray.ni@intel.com>2019-09-13 16:20:54 +0800
commit86ad762fa7a51cbf94e34e732961aae3de3339c3 (patch)
treed7572c31274c1c4170de2774b73488b9adde029d /UefiCpuPkg/PiSmmCpuDxeSmm
parent5a9db858806912ebd4e836aaa607ef6d87ce9c0d (diff)
downloadedk2-86ad762fa7a51cbf94e34e732961aae3de3339c3.tar.gz
edk2-86ad762fa7a51cbf94e34e732961aae3de3339c3.tar.bz2
edk2-86ad762fa7a51cbf94e34e732961aae3de3339c3.zip
UefiCpuPkg/PiSmmCpu: Enable 5L paging only when phy addr line > 48
Today's behavior is to enable 5l paging when CPU supports it (CPUID[7,0].ECX.BIT[16] is set). The patch changes the behavior to enable 5l paging when two conditions are both met: 1. CPU supports it; 2. The max physical address bits is bigger than 48. Because 4-level paging can support to address physical address up to 2^48 - 1, there is no need to enable 5-level paging with max physical address bits <= 48. Signed-off-by: Ray Ni <ray.ni@intel.com> Cc: Liming Gao <liming.gao@intel.com> Reviewed-by: Eric Dong <eric.dong@intel.com> Cc: Laszlo Ersek <lersek@redhat.com>
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c57
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm4
2 files changed, 39 insertions, 22 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 733d107efd..e5c4788c13 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -16,8 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
BOOLEAN m1GPageTableSupport = FALSE;
BOOLEAN mCpuSmmRestrictedMemoryAccess;
-BOOLEAN m5LevelPagingSupport;
-X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;
+BOOLEAN m5LevelPagingNeeded;
+X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
/**
Disable CET.
@@ -63,28 +63,45 @@ Is1GPageSupport (
}
/**
- Check if 5-level paging is supported by processor or not.
-
- @retval TRUE 5-level paging is supported.
- @retval FALSE 5-level paging is not supported.
+ The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
+ the max physical address bits is bigger than 48. Because 4-level paging can support
+ to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
+ with max physical address bits <= 48.
+ @retval TRUE 5-level paging enabling is needed.
+ @retval FALSE 5-level paging enabling is not needed.
**/
BOOLEAN
-Is5LevelPagingSupport (
+Is5LevelPagingNeeded (
VOID
)
{
- CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
+ UINT32 MaxExtendedFunctionId;
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
+ if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
+ } else {
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
+ }
AsmCpuidEx (
CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
- NULL,
- NULL,
- &EcxFlags.Uint32,
- NULL
+ NULL, NULL, &ExtFeatureEcx.Uint32, NULL
);
- return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
+ DEBUG ((
+ DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
+ VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
+ ));
+
+ if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
+ ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
+ return TRUE;
+ } else {
+ return FALSE;
+ }
}
/**
@@ -190,7 +207,7 @@ SetStaticPageTable (
// when 5-Level Paging is disabled.
//
ASSERT (mPhysicalAddressBits <= 52);
- if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
+ if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {
mPhysicalAddressBits = 48;
}
@@ -217,7 +234,7 @@ SetStaticPageTable (
PageMapLevel4Entry = PageMap;
PageMapLevel5Entry = NULL;
- if (m5LevelPagingSupport) {
+ if (m5LevelPagingNeeded) {
//
// By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
//
@@ -233,7 +250,7 @@ SetStaticPageTable (
// So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
// When 5-Level Paging is disabled, below allocation happens only once.
//
- if (m5LevelPagingSupport) {
+ if (m5LevelPagingNeeded) {
PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
if (PageMapLevel4Entry == NULL) {
PageMapLevel4Entry = AllocatePageTableMemory (1);
@@ -336,10 +353,10 @@ SmmInitPageTable (
mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
m1GPageTableSupport = Is1GPageSupport ();
- m5LevelPagingSupport = Is5LevelPagingSupport ();
+ m5LevelPagingNeeded = Is5LevelPagingNeeded ();
mPhysicalAddressBits = CalculateMaximumSupportAddress ();
- PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
- DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));
+ PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
+ DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
@@ -370,7 +387,7 @@ SmmInitPageTable (
SetSubEntriesNum (Pml4Entry, 3);
PTEntry = Pml4Entry;
- if (m5LevelPagingSupport) {
+ if (m5LevelPagingNeeded) {
//
// Fill PML5 entry
//
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
index 271492a9d7..db06d22d51 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -69,7 +69,7 @@ extern ASM_PFX(mXdSupported)
global ASM_PFX(gPatchXdSupported)
global ASM_PFX(gPatchSmiStack)
global ASM_PFX(gPatchSmiCr3)
-global ASM_PFX(gPatch5LevelPagingSupport)
+global ASM_PFX(gPatch5LevelPagingNeeded)
global ASM_PFX(gcSmiHandlerTemplate)
global ASM_PFX(gcSmiHandlerSize)
@@ -127,7 +127,7 @@ ASM_PFX(gPatchSmiCr3):
mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
mov cl, strict byte 0 ; source operand will be patched
-ASM_PFX(gPatch5LevelPagingSupport):
+ASM_PFX(gPatch5LevelPagingNeeded):
cmp cl, 0
je SkipEnable5LevelPaging
;