summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
diff options
context:
space:
mode:
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c601
1 files changed, 326 insertions, 275 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 211a78b1c4..538394f239 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -10,16 +10,16 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
#include "PiSmmCpuDxeSmm.h"
-#define PAGE_TABLE_PAGES 8
-#define ACC_MAX_BIT BIT3
+#define PAGE_TABLE_PAGES 8
+#define ACC_MAX_BIT BIT3
-extern UINTN mSmmShadowStackSize;
+extern UINTN mSmmShadowStackSize;
-LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
-BOOLEAN m1GPageTableSupport = FALSE;
-BOOLEAN mCpuSmmRestrictedMemoryAccess;
-BOOLEAN m5LevelPagingNeeded;
-X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
+LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
+BOOLEAN m1GPageTableSupport = FALSE;
+BOOLEAN mCpuSmmRestrictedMemoryAccess;
+BOOLEAN m5LevelPagingNeeded;
+X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
/**
Disable CET.
@@ -51,8 +51,8 @@ Is1GPageSupport (
VOID
)
{
- UINT32 RegEax;
- UINT32 RegEdx;
+ UINT32 RegEax;
+ UINT32 RegEdx;
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
if (RegEax >= 0x80000001) {
@@ -61,6 +61,7 @@ Is1GPageSupport (
return TRUE;
}
}
+
return FALSE;
}
@@ -78,9 +79,9 @@ Is5LevelPagingNeeded (
VOID
)
{
- CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
- CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
- UINT32 MaxExtendedFunctionId;
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
+ UINT32 MaxExtendedFunctionId;
AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
@@ -88,14 +89,20 @@ Is5LevelPagingNeeded (
} else {
VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
}
+
AsmCpuidEx (
CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
- NULL, NULL, &ExtFeatureEcx.Uint32, NULL
+ NULL,
+ NULL,
+ &ExtFeatureEcx.Uint32,
+ NULL
);
DEBUG ((
- DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
- VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
+ DEBUG_INFO,
+ "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
+ VirPhyAddressSize.Bits.PhysicalAddressBits,
+ ExtFeatureEcx.Bits.FiveLevelPage
));
if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
@@ -114,18 +121,19 @@ Is5LevelPagingNeeded (
**/
VOID
GetPageTable (
- OUT UINTN *Base,
- OUT BOOLEAN *FiveLevels OPTIONAL
+ OUT UINTN *Base,
+ OUT BOOLEAN *FiveLevels OPTIONAL
)
{
- IA32_CR4 Cr4;
+ IA32_CR4 Cr4;
if (mInternalCr3 == 0) {
*Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
if (FiveLevels != NULL) {
- Cr4.UintN = AsmReadCr4 ();
+ Cr4.UintN = AsmReadCr4 ();
*FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
}
+
return;
}
@@ -146,8 +154,8 @@ GetPageTable (
**/
VOID
SetSubEntriesNum (
- IN OUT UINT64 *Entry,
- IN UINT64 SubEntryNum
+ IN OUT UINT64 *Entry,
+ IN UINT64 SubEntryNum
)
{
//
@@ -167,7 +175,7 @@ SetSubEntriesNum (
**/
UINT64
GetSubEntriesNum (
- IN UINT64 *Entry
+ IN UINT64 *Entry
)
{
//
@@ -186,25 +194,26 @@ CalculateMaximumSupportAddress (
VOID
)
{
- UINT32 RegEax;
- UINT8 PhysicalAddressBits;
- VOID *Hob;
+ UINT32 RegEax;
+ UINT8 PhysicalAddressBits;
+ VOID *Hob;
//
// Get physical address bits supported.
//
Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
if (Hob != NULL) {
- PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
+ PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
} else {
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
if (RegEax >= 0x80000008) {
AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
- PhysicalAddressBits = (UINT8) RegEax;
+ PhysicalAddressBits = (UINT8)RegEax;
} else {
PhysicalAddressBits = 36;
}
}
+
return PhysicalAddressBits;
}
@@ -216,54 +225,54 @@ CalculateMaximumSupportAddress (
**/
VOID
SetStaticPageTable (
- IN UINTN PageTable,
- IN UINT8 PhysicalAddressBits
+ IN UINTN PageTable,
+ IN UINT8 PhysicalAddressBits
)
{
- UINT64 PageAddress;
- UINTN NumberOfPml5EntriesNeeded;
- UINTN NumberOfPml4EntriesNeeded;
- UINTN NumberOfPdpEntriesNeeded;
- UINTN IndexOfPml5Entries;
- UINTN IndexOfPml4Entries;
- UINTN IndexOfPdpEntries;
- UINTN IndexOfPageDirectoryEntries;
- UINT64 *PageMapLevel5Entry;
- UINT64 *PageMapLevel4Entry;
- UINT64 *PageMap;
- UINT64 *PageDirectoryPointerEntry;
- UINT64 *PageDirectory1GEntry;
- UINT64 *PageDirectoryEntry;
+ UINT64 PageAddress;
+ UINTN NumberOfPml5EntriesNeeded;
+ UINTN NumberOfPml4EntriesNeeded;
+ UINTN NumberOfPdpEntriesNeeded;
+ UINTN IndexOfPml5Entries;
+ UINTN IndexOfPml4Entries;
+ UINTN IndexOfPdpEntries;
+ UINTN IndexOfPageDirectoryEntries;
+ UINT64 *PageMapLevel5Entry;
+ UINT64 *PageMapLevel4Entry;
+ UINT64 *PageMap;
+ UINT64 *PageDirectoryPointerEntry;
+ UINT64 *PageDirectory1GEntry;
+ UINT64 *PageDirectoryEntry;
//
// IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
// when 5-Level Paging is disabled.
//
ASSERT (PhysicalAddressBits <= 52);
- if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) {
+ if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
PhysicalAddressBits = 48;
}
NumberOfPml5EntriesNeeded = 1;
if (PhysicalAddressBits > 48) {
- NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48);
- PhysicalAddressBits = 48;
+ NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);
+ PhysicalAddressBits = 48;
}
NumberOfPml4EntriesNeeded = 1;
if (PhysicalAddressBits > 39) {
- NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39);
- PhysicalAddressBits = 39;
+ NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);
+ PhysicalAddressBits = 39;
}
NumberOfPdpEntriesNeeded = 1;
ASSERT (PhysicalAddressBits > 30);
- NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30);
+ NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
//
// By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
//
- PageMap = (VOID *) PageTable;
+ PageMap = (VOID *)PageTable;
PageMapLevel4Entry = PageMap;
PageMapLevel5Entry = NULL;
@@ -273,22 +282,24 @@ SetStaticPageTable (
//
PageMapLevel5Entry = PageMap;
}
- PageAddress = 0;
+
+ PageAddress = 0;
for ( IndexOfPml5Entries = 0
- ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
- ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
+ ; IndexOfPml5Entries++, PageMapLevel5Entry++)
+ {
//
// Each PML5 entry points to a page of PML4 entires.
// So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
// When 5-Level Paging is disabled, below allocation happens only once.
//
if (m5LevelPagingNeeded) {
- PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
+ PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
if (PageMapLevel4Entry == NULL) {
PageMapLevel4Entry = AllocatePageTableMemory (1);
- ASSERT(PageMapLevel4Entry != NULL);
- ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
+ ASSERT (PageMapLevel4Entry != NULL);
+ ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
*PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
}
@@ -298,11 +309,11 @@ SetStaticPageTable (
//
// Each PML4 entry points to a page of Page Directory Pointer entries.
//
- PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
+ PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
if (PageDirectoryPointerEntry == NULL) {
PageDirectoryPointerEntry = AllocatePageTableMemory (1);
- ASSERT(PageDirectoryPointerEntry != NULL);
- ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
+ ASSERT (PageDirectoryPointerEntry != NULL);
+ ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
*PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
}
@@ -310,12 +321,13 @@ SetStaticPageTable (
if (m1GPageTableSupport) {
PageDirectory1GEntry = PageDirectoryPointerEntry;
for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
- if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
+ if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
//
// Skip the < 4G entries
//
continue;
}
+
//
// Fill in the Page Directory entries
//
@@ -324,21 +336,22 @@ SetStaticPageTable (
} else {
PageAddress = BASE_4GB;
for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
- if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
+ if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
//
// Skip the < 4G entries
//
continue;
}
+
//
// Each Directory Pointer entries points to a page of Page Directory entires.
// So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
//
- PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
+ PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
if (PageDirectoryEntry == NULL) {
PageDirectoryEntry = AllocatePageTableMemory (1);
- ASSERT(PageDirectoryEntry != NULL);
- ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
+ ASSERT (PageDirectoryEntry != NULL);
+ ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
//
// Fill in a Page Directory Pointer Entries
@@ -369,15 +382,15 @@ SmmInitPageTable (
VOID
)
{
- EFI_PHYSICAL_ADDRESS Pages;
- UINT64 *PTEntry;
- LIST_ENTRY *FreePage;
- UINTN Index;
- UINTN PageFaultHandlerHookAddress;
- IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
- EFI_STATUS Status;
- UINT64 *Pml4Entry;
- UINT64 *Pml5Entry;
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+ LIST_ENTRY *FreePage;
+ UINTN Index;
+ UINTN PageFaultHandlerHookAddress;
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
+ EFI_STATUS Status;
+ UINT64 *Pml4Entry;
+ UINT64 *Pml5Entry;
//
// Initialize spin lock
@@ -401,7 +414,7 @@ SmmInitPageTable (
//
// Set IA32_PG_PMNT bit to mask this entry
//
- PTEntry = (UINT64*)(UINTN)Pages;
+ PTEntry = (UINT64 *)(UINTN)Pages;
for (Index = 0; Index < 4; Index++) {
PTEntry[Index] |= IA32_PG_PMNT;
}
@@ -409,7 +422,7 @@ SmmInitPageTable (
//
// Fill Page-Table-Level4 (PML4) entry
//
- Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
+ Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
ASSERT (Pml4Entry != NULL);
*Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
@@ -424,9 +437,9 @@ SmmInitPageTable (
//
// Fill PML5 entry
//
- Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
+ Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
ASSERT (Pml5Entry != NULL);
- *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
//
// Set sub-entries number
@@ -445,7 +458,7 @@ SmmInitPageTable (
//
// Add pages to page pool
//
- FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
+ FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
ASSERT (FreePage != NULL);
for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
InsertTailList (&mPagePool, FreePage);
@@ -455,20 +468,21 @@ SmmInitPageTable (
if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
HEAP_GUARD_NONSTOP_MODE ||
- NULL_DETECTION_NONSTOP_MODE) {
+ NULL_DETECTION_NONSTOP_MODE)
+ {
//
// Set own Page Fault entry instead of the default one, because SMM Profile
// feature depends on IRET instruction to do Single Step
//
PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
- IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
- IdtEntry += EXCEPT_IA32_PAGE_FAULT;
- IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
- IdtEntry->Bits.Reserved_0 = 0;
- IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
- IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
- IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
- IdtEntry->Bits.Reserved_1 = 0;
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
+ IdtEntry->Bits.Reserved_0 = 0;
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
+ IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
+ IdtEntry->Bits.Reserved_1 = 0;
} else {
//
// Register Smm Page Fault Handler
@@ -509,8 +523,8 @@ SmmInitPageTable (
**/
VOID
SetAccNum (
- IN OUT UINT64 *Entry,
- IN UINT64 Acc
+ IN OUT UINT64 *Entry,
+ IN UINT64 Acc
)
{
//
@@ -529,7 +543,7 @@ SetAccNum (
**/
UINT64
GetAccNum (
- IN UINT64 *Entry
+ IN UINT64 *Entry
)
{
//
@@ -548,10 +562,10 @@ GetAccNum (
**/
UINT64
GetAndUpdateAccNum (
- IN OUT UINT64 *Entry
+ IN OUT UINT64 *Entry
)
{
- UINT64 Acc;
+ UINT64 Acc;
Acc = GetAccNum (Entry);
if ((*Entry & IA32_PG_A) != 0) {
@@ -570,6 +584,7 @@ GetAndUpdateAccNum (
SetAccNum (Entry, Acc - 1);
}
}
+
return Acc;
}
@@ -587,84 +602,86 @@ ReclaimPages (
VOID
)
{
- UINT64 Pml5Entry;
- UINT64 *Pml5;
- UINT64 *Pml4;
- UINT64 *Pdpt;
- UINT64 *Pdt;
- UINTN Pml5Index;
- UINTN Pml4Index;
- UINTN PdptIndex;
- UINTN PdtIndex;
- UINTN MinPml5;
- UINTN MinPml4;
- UINTN MinPdpt;
- UINTN MinPdt;
- UINT64 MinAcc;
- UINT64 Acc;
- UINT64 SubEntriesNum;
- BOOLEAN PML4EIgnore;
- BOOLEAN PDPTEIgnore;
- UINT64 *ReleasePageAddress;
- IA32_CR4 Cr4;
- BOOLEAN Enable5LevelPaging;
- UINT64 PFAddress;
- UINT64 PFAddressPml5Index;
- UINT64 PFAddressPml4Index;
- UINT64 PFAddressPdptIndex;
- UINT64 PFAddressPdtIndex;
-
- Pml4 = NULL;
- Pdpt = NULL;
- Pdt = NULL;
- MinAcc = (UINT64)-1;
- MinPml4 = (UINTN)-1;
- MinPml5 = (UINTN)-1;
- MinPdpt = (UINTN)-1;
- MinPdt = (UINTN)-1;
- Acc = 0;
+ UINT64 Pml5Entry;
+ UINT64 *Pml5;
+ UINT64 *Pml4;
+ UINT64 *Pdpt;
+ UINT64 *Pdt;
+ UINTN Pml5Index;
+ UINTN Pml4Index;
+ UINTN PdptIndex;
+ UINTN PdtIndex;
+ UINTN MinPml5;
+ UINTN MinPml4;
+ UINTN MinPdpt;
+ UINTN MinPdt;
+ UINT64 MinAcc;
+ UINT64 Acc;
+ UINT64 SubEntriesNum;
+ BOOLEAN PML4EIgnore;
+ BOOLEAN PDPTEIgnore;
+ UINT64 *ReleasePageAddress;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+ UINT64 PFAddress;
+ UINT64 PFAddressPml5Index;
+ UINT64 PFAddressPml4Index;
+ UINT64 PFAddressPdptIndex;
+ UINT64 PFAddressPdtIndex;
+
+ Pml4 = NULL;
+ Pdpt = NULL;
+ Pdt = NULL;
+ MinAcc = (UINT64)-1;
+ MinPml4 = (UINTN)-1;
+ MinPml5 = (UINTN)-1;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ Acc = 0;
ReleasePageAddress = 0;
- PFAddress = AsmReadCr2 ();
+ PFAddress = AsmReadCr2 ();
PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
- PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
+ PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
- Cr4.UintN = AsmReadCr4 ();
- Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
- Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
+ Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
if (!Enable5LevelPaging) {
//
// Create one fake PML5 entry for 4-Level Paging
// so that the page table parsing logic only handles 5-Level page structure.
//
- Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
- Pml5 = &Pml5Entry;
+ Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
+ Pml5 = &Pml5Entry;
}
//
// First, find the leaf entry has the smallest access record value
//
for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
- if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
+ if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
//
// If the PML5 entry is not present or is masked, skip it
//
continue;
}
- Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
+
+ Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
- if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
+ if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
//
// If the PML4 entry is not present or is masked, skip it
//
continue;
}
- Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
+
+ Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
PML4EIgnore = FALSE;
for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
- if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
//
// If the PDPT entry is not present or is masked, skip it
//
@@ -674,18 +691,20 @@ ReclaimPages (
//
PML4EIgnore = TRUE;
}
+
continue;
}
+
if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
//
// It's not 1-GByte pages entry, it should be a PDPT entry,
// we will not check PML4 entry more
//
PML4EIgnore = TRUE;
- Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
+ Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
PDPTEIgnore = FALSE;
- for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
- if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
+ if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
//
// If the PD entry is not present or is masked, skip it
//
@@ -695,79 +714,86 @@ ReclaimPages (
//
PDPTEIgnore = TRUE;
}
+
continue;
}
+
if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
//
// It's not 2 MByte page table entry, it should be PD entry
// we will find the entry has the smallest access record value
//
PDPTEIgnore = TRUE;
- if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
- Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
+ if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
+ (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
+ {
Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
if (Acc < MinAcc) {
//
// If the PD entry has the smallest access record value,
// save the Page address to be released
//
- MinAcc = Acc;
- MinPml5 = Pml5Index;
- MinPml4 = Pml4Index;
- MinPdpt = PdptIndex;
- MinPdt = PdtIndex;
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = PdtIndex;
ReleasePageAddress = Pdt + PdtIndex;
}
}
}
}
+
if (!PDPTEIgnore) {
//
// If this PDPT entry has no PDT entries pointer to 4 KByte pages,
// it should only has the entries point to 2 MByte Pages
//
- if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
- Pml5Index != PFAddressPml5Index) {
+ if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
+ (Pml5Index != PFAddressPml5Index))
+ {
Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
if (Acc < MinAcc) {
//
// If the PDPT entry has the smallest access record value,
// save the Page address to be released
//
- MinAcc = Acc;
- MinPml5 = Pml5Index;
- MinPml4 = Pml4Index;
- MinPdpt = PdptIndex;
- MinPdt = (UINTN)-1;
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = (UINTN)-1;
ReleasePageAddress = Pdpt + PdptIndex;
}
}
}
}
}
+
if (!PML4EIgnore) {
//
// If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
// it should only has the entries point to 1 GByte Pages
//
- if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
+ if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
if (Acc < MinAcc) {
//
// If the PML4 entry has the smallest access record value,
// save the Page address to be released
//
- MinAcc = Acc;
- MinPml5 = Pml5Index;
- MinPml4 = Pml4Index;
- MinPdpt = (UINTN)-1;
- MinPdt = (UINTN)-1;
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
ReleasePageAddress = Pml4 + Pml4Index;
}
}
}
}
}
+
//
// Make sure one PML4/PDPT/PD entry is selected
//
@@ -776,7 +802,7 @@ ReclaimPages (
//
// Secondly, insert the page pointed by this entry into page pool and clear this entry
//
- InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
+ InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
*ReleasePageAddress = 0;
//
@@ -788,16 +814,17 @@ ReclaimPages (
//
// If 4 KByte Page Table is released, check the PDPT entry
//
- Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
- Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
- SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
- if (SubEntriesNum == 0 &&
- (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
+ Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
+ Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
+ SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
+ if ((SubEntriesNum == 0) &&
+ ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
+ {
//
// Release the empty Page Directory table if there was no more 4 KByte Page Table entry
// clear the Page directory entry
//
- InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
+ InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
Pdpt[MinPdpt] = 0;
//
// Go on checking the PML4 table
@@ -805,33 +832,37 @@ ReclaimPages (
MinPdt = (UINTN)-1;
continue;
}
+
//
// Update the sub-entries filed in PDPT entry and exit
//
SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
break;
}
+
if (MinPdpt != (UINTN)-1) {
//
// One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
//
SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
- if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
+ if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
//
// Release the empty PML4 table if there was no more 1G KByte Page Table entry
// clear the Page directory entry
//
- InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
+ InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
Pml4[MinPml4] = 0;
- MinPdpt = (UINTN)-1;
+ MinPdpt = (UINTN)-1;
continue;
}
+
//
// Update the sub-entries filed in PML4 entry and exit
//
SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
break;
}
+
//
// PLM4 table has been released before, exit it
//
@@ -850,7 +881,7 @@ AllocPage (
VOID
)
{
- UINT64 RetVal;
+ UINT64 RetVal;
if (IsListEmpty (&mPagePool)) {
//
@@ -867,7 +898,7 @@ AllocPage (
//
// Clean this page and return
//
- ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
+ ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
return RetVal;
}
@@ -880,78 +911,81 @@ SmiDefaultPFHandler (
VOID
)
{
- UINT64 *PageTable;
- UINT64 *PageTableTop;
- UINT64 PFAddress;
- UINTN StartBit;
- UINTN EndBit;
- UINT64 PTIndex;
- UINTN Index;
- SMM_PAGE_SIZE_TYPE PageSize;
- UINTN NumOfPages;
- UINTN PageAttribute;
- EFI_STATUS Status;
- UINT64 *UpperEntry;
- BOOLEAN Enable5LevelPaging;
- IA32_CR4 Cr4;
+ UINT64 *PageTable;
+ UINT64 *PageTableTop;
+ UINT64 PFAddress;
+ UINTN StartBit;
+ UINTN EndBit;
+ UINT64 PTIndex;
+ UINTN Index;
+ SMM_PAGE_SIZE_TYPE PageSize;
+ UINTN NumOfPages;
+ UINTN PageAttribute;
+ EFI_STATUS Status;
+ UINT64 *UpperEntry;
+ BOOLEAN Enable5LevelPaging;
+ IA32_CR4 Cr4;
//
// Set default SMM page attribute
//
- PageSize = SmmPageSize2M;
- NumOfPages = 1;
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
PageAttribute = 0;
- EndBit = 0;
- PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
- PFAddress = AsmReadCr2 ();
+ EndBit = 0;
+ PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
+ PFAddress = AsmReadCr2 ();
- Cr4.UintN = AsmReadCr4 ();
- Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
//
// If platform not support page table attribute, set default SMM page attribute
//
if (Status != EFI_SUCCESS) {
- PageSize = SmmPageSize2M;
- NumOfPages = 1;
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
PageAttribute = 0;
}
+
if (PageSize >= MaxSmmPageSizeType) {
PageSize = SmmPageSize2M;
}
+
if (NumOfPages > 512) {
NumOfPages = 512;
}
switch (PageSize) {
- case SmmPageSize4K:
- //
- // BIT12 to BIT20 is Page Table index
- //
- EndBit = 12;
- break;
- case SmmPageSize2M:
- //
- // BIT21 to BIT29 is Page Directory index
- //
- EndBit = 21;
- PageAttribute |= (UINTN)IA32_PG_PS;
- break;
- case SmmPageSize1G:
- if (!m1GPageTableSupport) {
- DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
+ case SmmPageSize4K:
+ //
+ // BIT12 to BIT20 is Page Table index
+ //
+ EndBit = 12;
+ break;
+ case SmmPageSize2M:
+ //
+ // BIT21 to BIT29 is Page Directory index
+ //
+ EndBit = 21;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ case SmmPageSize1G:
+ if (!m1GPageTableSupport) {
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
+ ASSERT (FALSE);
+ }
+
+ //
+ // BIT30 to BIT38 is Page Directory Pointer Table index
+ //
+ EndBit = 30;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ default:
ASSERT (FALSE);
- }
- //
- // BIT30 to BIT38 is Page Directory Pointer Table index
- //
- EndBit = 30;
- PageAttribute |= (UINTN)IA32_PG_PS;
- break;
- default:
- ASSERT (FALSE);
}
//
@@ -977,13 +1011,14 @@ SmiDefaultPFHandler (
//
UpperEntry = PageTable + PTIndex;
}
+
//
// BIT9 to BIT11 of entry is used to save access record,
// initialize value is 7
//
PageTable[PTIndex] |= (UINT64)IA32_PG_A;
SetAccNum (PageTable + PTIndex, 7);
- PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
}
PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
@@ -996,6 +1031,7 @@ SmiDefaultPFHandler (
DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
ASSERT (FALSE);
}
+
//
// Fill the new entry
//
@@ -1004,6 +1040,7 @@ SmiDefaultPFHandler (
if (UpperEntry != NULL) {
SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
}
+
//
// Get the next page address if we need to create more page tables
//
@@ -1022,14 +1059,14 @@ SmiDefaultPFHandler (
VOID
EFIAPI
SmiPFHandler (
- IN EFI_EXCEPTION_TYPE InterruptType,
- IN EFI_SYSTEM_CONTEXT SystemContext
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
)
{
- UINTN PFAddress;
- UINTN GuardPageAddress;
- UINTN ShadowStackGuardPageAddress;
- UINTN CpuIndex;
+ UINTN PFAddress;
+ UINTN GuardPageAddress;
+ UINTN ShadowStackGuardPageAddress;
+ UINTN CpuIndex;
ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
@@ -1049,31 +1086,34 @@ SmiPFHandler (
// or SMM page protection violation.
//
if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
- (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
+ {
DumpCpuContext (InterruptType, SystemContext);
- CpuIndex = GetCpuIndex ();
- GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
+ CpuIndex = GetCpuIndex ();
+ GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
(PFAddress >= GuardPageAddress) &&
- (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
+ (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
+ {
DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
} else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
- (mSmmShadowStackSize > 0) &&
- (PFAddress >= ShadowStackGuardPageAddress) &&
- (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) {
+ (mSmmShadowStackSize > 0) &&
+ (PFAddress >= ShadowStackGuardPageAddress) &&
+ (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
+ {
DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
} else {
if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
- );
+ );
} else {
DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
- );
+ );
}
if (HEAP_GUARD_NONSTOP_MODE) {
@@ -1081,6 +1121,7 @@ SmiPFHandler (
goto Exit;
}
}
+
CpuDeadLoop ();
goto Exit;
}
@@ -1089,13 +1130,14 @@ SmiPFHandler (
// If a page fault occurs in non-SMRAM range.
//
if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
- (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
+ {
if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
DumpCpuContext (InterruptType, SystemContext);
DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
- );
+ );
CpuDeadLoop ();
goto Exit;
}
@@ -1103,13 +1145,14 @@ SmiPFHandler (
//
// If NULL pointer was just accessed
//
- if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
- (PFAddress < EFI_PAGE_SIZE)) {
+ if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
+ (PFAddress < EFI_PAGE_SIZE))
+ {
DumpCpuContext (InterruptType, SystemContext);
DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
DEBUG_CODE (
DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
- );
+ );
if (NULL_DETECTION_NONSTOP_MODE) {
GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
@@ -1125,7 +1168,7 @@ SmiPFHandler (
DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
- );
+ );
CpuDeadLoop ();
goto Exit;
}
@@ -1152,20 +1195,20 @@ SetPageTableAttributes (
VOID
)
{
- UINTN Index2;
- UINTN Index3;
- UINTN Index4;
- UINTN Index5;
- UINT64 *L1PageTable;
- UINT64 *L2PageTable;
- UINT64 *L3PageTable;
- UINT64 *L4PageTable;
- UINT64 *L5PageTable;
- UINTN PageTableBase;
- BOOLEAN IsSplitted;
- BOOLEAN PageTableSplitted;
- BOOLEAN CetEnabled;
- BOOLEAN Enable5LevelPaging;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+ UINTN PageTableBase;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+ BOOLEAN CetEnabled;
+ BOOLEAN Enable5LevelPaging;
//
// Don't mark page table memory as read-only if
@@ -1177,18 +1220,21 @@ SetPageTableAttributes (
//
if (!mCpuSmmRestrictedMemoryAccess ||
((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
- FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ FeaturePcdGet (PcdCpuSmmProfileEnable))
+ {
//
// Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
//
- ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
- (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
+ ASSERT (
+ !(mCpuSmmRestrictedMemoryAccess &&
+ (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0)
+ );
//
// Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
//
ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
- return ;
+ return;
}
DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
@@ -1197,19 +1243,20 @@ SetPageTableAttributes (
// Disable write protection, because we need mark page table to be write protected.
// We need *write* page table memory, to mark itself to be *read only*.
//
- CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
+ CetEnabled = ((AsmReadCr4 () & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
if (CetEnabled) {
//
// CET must be disabled if WP is disabled.
//
- DisableCet();
+ DisableCet ();
}
- AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);
do {
DEBUG ((DEBUG_INFO, "Start...\n"));
PageTableSplitted = FALSE;
- L5PageTable = NULL;
+ L5PageTable = NULL;
GetPageTable (&PageTableBase, &Enable5LevelPaging);
@@ -1219,7 +1266,7 @@ SetPageTableAttributes (
PageTableSplitted = (PageTableSplitted || IsSplitted);
}
- for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
+ for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof (UINT64) : 1); Index5++) {
if (Enable5LevelPaging) {
L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L4PageTable == NULL) {
@@ -1228,10 +1275,11 @@ SetPageTableAttributes (
} else {
L4PageTable = (UINT64 *)PageTableBase;
}
+
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
- for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof (UINT64); Index4++) {
L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L3PageTable == NULL) {
continue;
@@ -1240,11 +1288,12 @@ SetPageTableAttributes (
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
- for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof (UINT64); Index3++) {
if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
// 1G
continue;
}
+
L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L2PageTable == NULL) {
continue;
@@ -1253,15 +1302,17 @@ SetPageTableAttributes (
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
- for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof (UINT64); Index2++) {
if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
// 2M
continue;
}
+
L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
if (L1PageTable == NULL) {
continue;
}
+
SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
PageTableSplitted = (PageTableSplitted || IsSplitted);
}
@@ -1273,15 +1324,15 @@ SetPageTableAttributes (
//
// Enable write protection, after page table updated.
//
- AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+ AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
if (CetEnabled) {
//
// re-enable CET.
//
- EnableCet();
+ EnableCet ();
}
- return ;
+ return;
}
/**