From 4e78c7bebb2c30cab0afa193a4eeb0ef05ca9a12 Mon Sep 17 00:00:00 2001 From: Ray Ni Date: Fri, 12 Jul 2019 14:59:32 +0800 Subject: Revert "UefiCpuPkg/PiSmmCpu: Enable 5 level paging when CPU supports" This reverts commit 7365eb2c8cf1d7112330d09918c0c67e8d0b827a. Commit 7c5010c7f8 MdePkg/BaseLib.h: Update IA32_CR4 structure for 5-level paging technically breaks the EDKII development process documented in https://github.com/tianocore/tianocore.github.io/wiki/EDK-II-Development-Process and Maintainers.txt in EDKII repo root directory. The voilation is commit 7c5010c7f8 doesn't have a Reviewed-by or Acked-by from MdePkg maintainers. In order to revert 7c5010c7f8, 7365eb2c8 needs to revert first otherwise simply reverting 7c5010c7f8 will cause build break. Signed-off-by: Ray Ni --- UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c | 20 +- UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c | 272 +++++------- UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c | 485 ++++++++------------- UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm | 12 - UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c | 72 ++- 5 files changed, 300 insertions(+), 561 deletions(-) (limited to 'UefiCpuPkg') diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c index 55090e9c3e..069be3aaa5 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c @@ -125,36 +125,18 @@ GetPageTableEntry ( UINTN Index2; UINTN Index3; UINTN Index4; - UINTN Index5; UINT64 *L1PageTable; UINT64 *L2PageTable; UINT64 *L3PageTable; UINT64 *L4PageTable; - UINT64 *L5PageTable; - IA32_CR4 Cr4; - BOOLEAN Enable5LevelPaging; - Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_PAE_INDEX_MASK; Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK; Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK; Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK; Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK; - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); - if (sizeof(UINTN) == sizeof(UINT64)) { - if (Enable5LevelPaging) { - L5PageTable = (UINT64 *)GetPageTableBase (); - if (L5PageTable[Index5] == 0) { - *PageAttribute = PageNone; - return NULL; - } - - L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); - } else { - L4PageTable = (UINT64 *)GetPageTableBase (); - } + L4PageTable = (UINT64 *)GetPageTableBase (); if (L4PageTable[Index4] == 0) { *PageAttribute = PageNone; return NULL; diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c index c5131526f0..e2b6a2d9b2 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c @@ -534,78 +534,43 @@ InitPaging ( VOID ) { - UINT64 Pml5Entry; - UINT64 Pml4Entry; - UINT64 *Pml5; UINT64 *Pml4; UINT64 *Pdpt; UINT64 *Pd; UINT64 *Pt; UINTN Address; - UINTN Pml5Index; UINTN Pml4Index; UINTN PdptIndex; UINTN PdIndex; UINTN PtIndex; UINTN NumberOfPdptEntries; UINTN NumberOfPml4Entries; - UINTN NumberOfPml5Entries; UINTN SizeOfMemorySpace; BOOLEAN Nx; - IA32_CR4 Cr4; - BOOLEAN Enable5LevelPaging; - - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); if (sizeof (UINTN) == sizeof (UINT64)) { - if (!Enable5LevelPaging) { - Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P; - Pml5 = &Pml5Entry; - } else { - Pml5 = (UINT64*) (UINTN) mSmmProfileCr3; - } + Pml4 = (UINT64*)(UINTN)mSmmProfileCr3; SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1; // // Calculate the table entries of PML4E and PDPTE. // - NumberOfPml5Entries = 1; - if (SizeOfMemorySpace > 48) { - NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48); - SizeOfMemorySpace = 48; - } - - NumberOfPml4Entries = 1; - if (SizeOfMemorySpace > 39) { - NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39); - SizeOfMemorySpace = 39; + if (SizeOfMemorySpace <= 39 ) { + NumberOfPml4Entries = 1; + NumberOfPdptEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30)); + } else { + NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39)); + NumberOfPdptEntries = 512; } - - NumberOfPdptEntries = 1; - ASSERT (SizeOfMemorySpace > 30); - NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30); } else { - Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P; - Pml4 = &Pml4Entry; - Pml5Entry = (UINTN) Pml4 | IA32_PG_P; - Pml5 = &Pml5Entry; - NumberOfPml5Entries = 1; - NumberOfPml4Entries = 1; + NumberOfPml4Entries = 1; NumberOfPdptEntries = 4; } // // Go through page table and change 2MB-page into 4KB-page. // - for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) { - if ((Pml5[Pml5Index] & IA32_PG_P) == 0) { - // - // If PML5 entry does not exist, skip it - // - continue; - } - Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK); - for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) { + for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) { + if (sizeof (UINTN) == sizeof (UINT64)) { if ((Pml4[Pml4Index] & IA32_PG_P) == 0) { // // If PML4 entry does not exist, skip it @@ -613,76 +578,63 @@ InitPaging ( continue; } Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) { - if ((*Pdpt & IA32_PG_P) == 0) { - // - // If PDPT entry does not exist, skip it - // - continue; - } - if ((*Pdpt & IA32_PG_PS) != 0) { + } else { + Pdpt = (UINT64*)(UINTN)mSmmProfileCr3; + } + for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) { + if ((*Pdpt & IA32_PG_P) == 0) { + // + // If PDPT entry does not exist, skip it + // + continue; + } + if ((*Pdpt & IA32_PG_PS) != 0) { + // + // This is 1G entry, skip it + // + continue; + } + Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); + if (Pd == 0) { + continue; + } + for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) { + if ((*Pd & IA32_PG_P) == 0) { // - // This is 1G entry, skip it + // If PD entry does not exist, skip it // continue; } - Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - if (Pd == 0) { - continue; - } - for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) { - if ((*Pd & IA32_PG_P) == 0) { - // - // If PD entry does not exist, skip it - // - continue; - } - Address = (UINTN) LShiftU64 ( - LShiftU64 ( - LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex, - 9 - ) + PdIndex, - 21 - ); + Address = (((PdptIndex << 9) + PdIndex) << 21); + // + // If it is 2M page, check IsAddressSplit() + // + if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) { // - // If it is 2M page, check IsAddressSplit() + // Based on current page table, create 4KB page table for split area. // - if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) { - // - // Based on current page table, create 4KB page table for split area. - // - ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK)); - - Pt = AllocatePageTableMemory (1); - ASSERT (Pt != NULL); + ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK)); - *Pd = (UINTN) Pt | IA32_PG_RW | IA32_PG_P; + Pt = AllocatePageTableMemory (1); + ASSERT (Pt != NULL); - // Split it - for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) { - *Pt = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS); - } // end for PT - *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS; - } // end if IsAddressSplit - } // end for PD - } // end for PDPT - } // end for PML4 - } // end for PML5 + // Split it + for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) { + Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS); + } // end for PT + *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS; + } // end if IsAddressSplit + } // end for PD + } // end for PDPT + } // end for PML4 // // Go through page table and set several page table entries to absent or execute-disable. // DEBUG ((EFI_D_INFO, "Patch page table start ...\n")); - for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) { - if ((Pml5[Pml5Index] & IA32_PG_P) == 0) { - // - // If PML5 entry does not exist, skip it - // - continue; - } - Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK); - for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) { + for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) { + if (sizeof (UINTN) == sizeof (UINT64)) { if ((Pml4[Pml4Index] & IA32_PG_P) == 0) { // // If PML4 entry does not exist, skip it @@ -690,73 +642,69 @@ InitPaging ( continue; } Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) { - if ((*Pdpt & IA32_PG_P) == 0) { - // - // If PDPT entry does not exist, skip it - // - continue; + } else { + Pdpt = (UINT64*)(UINTN)mSmmProfileCr3; + } + for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) { + if ((*Pdpt & IA32_PG_P) == 0) { + // + // If PDPT entry does not exist, skip it + // + continue; + } + if ((*Pdpt & IA32_PG_PS) != 0) { + // + // This is 1G entry, set NX bit and skip it + // + if (mXdSupported) { + *Pdpt = *Pdpt | IA32_PG_NX; } - if ((*Pdpt & IA32_PG_PS) != 0) { + continue; + } + Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); + if (Pd == 0) { + continue; + } + for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) { + if ((*Pd & IA32_PG_P) == 0) { // - // This is 1G entry, set NX bit and skip it + // If PD entry does not exist, skip it // - if (mXdSupported) { - *Pdpt = *Pdpt | IA32_PG_NX; - } continue; } - Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - if (Pd == 0) { - continue; - } - for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) { - if ((*Pd & IA32_PG_P) == 0) { + Address = (((PdptIndex << 9) + PdIndex) << 21); + + if ((*Pd & IA32_PG_PS) != 0) { + // 2MB page + + if (!IsAddressValid (Address, &Nx)) { // - // If PD entry does not exist, skip it + // Patch to remove Present flag and RW flag // + *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS); + } + if (Nx && mXdSupported) { + *Pd = *Pd | IA32_PG_NX; + } + } else { + // 4KB page + Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); + if (Pt == 0) { continue; } - Address = (UINTN) LShiftU64 ( - LShiftU64 ( - LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex, - 9 - ) + PdIndex, - 21 - ); - - if ((*Pd & IA32_PG_PS) != 0) { - // 2MB page - + for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) { if (!IsAddressValid (Address, &Nx)) { - // - // Patch to remove Present flag and RW flag - // - *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS); + *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS); } if (Nx && mXdSupported) { - *Pd = *Pd | IA32_PG_NX; - } - } else { - // 4KB page - Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - if (Pt == 0) { - continue; + *Pt = *Pt | IA32_PG_NX; } - for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) { - if (!IsAddressValid (Address, &Nx)) { - *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS); - } - if (Nx && mXdSupported) { - *Pt = *Pt | IA32_PG_NX; - } - Address += SIZE_4KB; - } // end for PT - } // end if PS - } // end for PD - } // end for PDPT - } // end for PML4 - } // end for PML5 + Address += SIZE_4KB; + } // end for PT + } // end if PS + } // end for PD + } // end for PDPT + } // end for PML4 // // Flush TLB @@ -1208,20 +1156,6 @@ RestorePageTableBelow4G ( { UINTN PTIndex; UINTN PFIndex; - IA32_CR4 Cr4; - BOOLEAN Enable5LevelPaging; - - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); - - // - // PML5 - // - if (Enable5LevelPaging) { - PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56); - ASSERT (PageTable[PTIndex] != 0); - PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); - } // // PML4 diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c index c31160735a..3d5d663d99 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c @@ -16,8 +16,6 @@ SPDX-License-Identifier: BSD-2-Clause-Patent LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool); BOOLEAN m1GPageTableSupport = FALSE; BOOLEAN mCpuSmmStaticPageTable; -BOOLEAN m5LevelPagingSupport; -X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport; /** Disable CET. @@ -62,31 +60,6 @@ Is1GPageSupport ( return FALSE; } -/** - Check if 5-level paging is supported by processor or not. - - @retval TRUE 5-level paging is supported. - @retval FALSE 5-level paging is not supported. - -**/ -BOOLEAN -Is5LevelPagingSupport ( - VOID - ) -{ - CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags; - - AsmCpuidEx ( - CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, - CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, - NULL, - NULL, - &EcxFlags.Uint32, - NULL - ); - return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0); -} - /** Set sub-entries number in entry. @@ -157,6 +130,14 @@ CalculateMaximumSupportAddress ( PhysicalAddressBits = 36; } } + + // + // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses. + // + ASSERT (PhysicalAddressBits <= 52); + if (PhysicalAddressBits > 48) { + PhysicalAddressBits = 48; + } return PhysicalAddressBits; } @@ -171,137 +152,89 @@ SetStaticPageTable ( ) { UINT64 PageAddress; - UINTN NumberOfPml5EntriesNeeded; UINTN NumberOfPml4EntriesNeeded; UINTN NumberOfPdpEntriesNeeded; - UINTN IndexOfPml5Entries; UINTN IndexOfPml4Entries; UINTN IndexOfPdpEntries; UINTN IndexOfPageDirectoryEntries; - UINT64 *PageMapLevel5Entry; UINT64 *PageMapLevel4Entry; UINT64 *PageMap; UINT64 *PageDirectoryPointerEntry; UINT64 *PageDirectory1GEntry; UINT64 *PageDirectoryEntry; - // - // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses - // when 5-Level Paging is disabled. - // - ASSERT (mPhysicalAddressBits <= 52); - if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) { - mPhysicalAddressBits = 48; - } - - NumberOfPml5EntriesNeeded = 1; - if (mPhysicalAddressBits > 48) { - NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48); - mPhysicalAddressBits = 48; - } - - NumberOfPml4EntriesNeeded = 1; - if (mPhysicalAddressBits > 39) { - NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39); - mPhysicalAddressBits = 39; + if (mPhysicalAddressBits <= 39 ) { + NumberOfPml4EntriesNeeded = 1; + NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30)); + } else { + NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39)); + NumberOfPdpEntriesNeeded = 512; } - NumberOfPdpEntriesNeeded = 1; - ASSERT (mPhysicalAddressBits > 30); - NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30); - // // By architecture only one PageMapLevel4 exists - so lets allocate storage for it. // PageMap = (VOID *) PageTable; PageMapLevel4Entry = PageMap; - PageMapLevel5Entry = NULL; - if (m5LevelPagingSupport) { - // - // By architecture only one PageMapLevel5 exists - so lets allocate storage for it. - // - PageMapLevel5Entry = PageMap; - } PageAddress = 0; - - for ( IndexOfPml5Entries = 0 - ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded - ; IndexOfPml5Entries++, PageMapLevel5Entry++) { + for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) { // - // Each PML5 entry points to a page of PML4 entires. - // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop. - // When 5-Level Paging is disabled, below allocation happens only once. + // Each PML4 entry points to a page of Page Directory Pointer entries. // - if (m5LevelPagingSupport) { - PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask); - if (PageMapLevel4Entry == NULL) { - PageMapLevel4Entry = AllocatePageTableMemory (1); - ASSERT(PageMapLevel4Entry != NULL); - ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1)); - - *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS; - } - } - - for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) { - // - // Each PML4 entry points to a page of Page Directory Pointer entries. - // - PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask); - if (PageDirectoryPointerEntry == NULL) { - PageDirectoryPointerEntry = AllocatePageTableMemory (1); - ASSERT(PageDirectoryPointerEntry != NULL); - ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1)); + PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask); + if (PageDirectoryPointerEntry == NULL) { + PageDirectoryPointerEntry = AllocatePageTableMemory (1); + ASSERT(PageDirectoryPointerEntry != NULL); + ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1)); - *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS; - } + *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS; + } - if (m1GPageTableSupport) { - PageDirectory1GEntry = PageDirectoryPointerEntry; - for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) { - if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) { - // - // Skip the < 4G entries - // - continue; - } + if (m1GPageTableSupport) { + PageDirectory1GEntry = PageDirectoryPointerEntry; + for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) { + if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) { // - // Fill in the Page Directory entries + // Skip the < 4G entries // - *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; + continue; } - } else { - PageAddress = BASE_4GB; - for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) { - if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) { - // - // Skip the < 4G entries - // - continue; - } + // + // Fill in the Page Directory entries + // + *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; + } + } else { + PageAddress = BASE_4GB; + for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) { + if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) { // - // Each Directory Pointer entries points to a page of Page Directory entires. - // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop. + // Skip the < 4G entries // - PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask); - if (PageDirectoryEntry == NULL) { - PageDirectoryEntry = AllocatePageTableMemory (1); - ASSERT(PageDirectoryEntry != NULL); - ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1)); + continue; + } + // + // Each Directory Pointer entries points to a page of Page Directory entires. + // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop. + // + PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask); + if (PageDirectoryEntry == NULL) { + PageDirectoryEntry = AllocatePageTableMemory (1); + ASSERT(PageDirectoryEntry != NULL); + ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1)); - // - // Fill in a Page Directory Pointer Entries - // - *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS; - } + // + // Fill in a Page Directory Pointer Entries + // + *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS; + } - for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) { - // - // Fill in the Page Directory entries - // - *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; - } + for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) { + // + // Fill in the Page Directory entries + // + *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS; } } } @@ -326,8 +259,6 @@ SmmInitPageTable ( UINTN PageFaultHandlerHookAddress; IA32_IDT_GATE_DESCRIPTOR *IdtEntry; EFI_STATUS Status; - UINT64 *Pml4Entry; - UINT64 *Pml5Entry; // // Initialize spin lock @@ -335,14 +266,12 @@ SmmInitPageTable ( InitializeSpinLock (mPFLock); mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable); - m1GPageTableSupport = Is1GPageSupport (); - m5LevelPagingSupport = Is5LevelPagingSupport (); - mPhysicalAddressBits = CalculateMaximumSupportAddress (); - PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1); - DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport)); - DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport)); - DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - %d\n", mCpuSmmStaticPageTable)); - DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits)); + m1GPageTableSupport = Is1GPageSupport (); + DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport)); + DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable)); + + mPhysicalAddressBits = CalculateMaximumSupportAddress (); + DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits)); // // Generate PAE page table for the first 4GB memory space // @@ -359,30 +288,15 @@ SmmInitPageTable ( // // Fill Page-Table-Level4 (PML4) entry // - Pml4Entry = (UINT64*)AllocatePageTableMemory (1); - ASSERT (Pml4Entry != NULL); - *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS; - ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry)); + PTEntry = (UINT64*)AllocatePageTableMemory (1); + ASSERT (PTEntry != NULL); + *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS; + ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry)); // // Set sub-entries number // - SetSubEntriesNum (Pml4Entry, 3); - PTEntry = Pml4Entry; - - if (m5LevelPagingSupport) { - // - // Fill PML5 entry - // - Pml5Entry = (UINT64*)AllocatePageTableMemory (1); - *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS; - ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry)); - // - // Set sub-entries number - // - SetSubEntriesNum (Pml5Entry, 1); - PTEntry = Pml5Entry; - } + SetSubEntriesNum (PTEntry, 3); if (mCpuSmmStaticPageTable) { SetStaticPageTable ((UINTN)PTEntry); @@ -430,7 +344,7 @@ SmmInitPageTable ( } // - // Return the address of PML4/PML5 (to set CR3) + // Return the address of PML4 (to set CR3) // return (UINT32)(UINTN)PTEntry; } @@ -522,16 +436,12 @@ ReclaimPages ( VOID ) { - UINT64 Pml5Entry; - UINT64 *Pml5; UINT64 *Pml4; UINT64 *Pdpt; UINT64 *Pdt; - UINTN Pml5Index; UINTN Pml4Index; UINTN PdptIndex; UINTN PdtIndex; - UINTN MinPml5; UINTN MinPml4; UINTN MinPdpt; UINTN MinPdt; @@ -541,149 +451,122 @@ ReclaimPages ( BOOLEAN PML4EIgnore; BOOLEAN PDPTEIgnore; UINT64 *ReleasePageAddress; - IA32_CR4 Cr4; - BOOLEAN Enable5LevelPaging; Pml4 = NULL; Pdpt = NULL; Pdt = NULL; MinAcc = (UINT64)-1; MinPml4 = (UINTN)-1; - MinPml5 = (UINTN)-1; MinPdpt = (UINTN)-1; MinPdt = (UINTN)-1; Acc = 0; ReleasePageAddress = 0; - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); - Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask); - - if (!Enable5LevelPaging) { - // - // Create one fake PML5 entry for 4-Level Paging - // so that the page table parsing logic only handles 5-Level page structure. - // - Pml5Entry = (UINTN) Pml5 | IA32_PG_P; - Pml5 = &Pml5Entry; - } - // // First, find the leaf entry has the smallest access record value // - for (Pml5Index = 0; Pml5Index < Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1; Pml5Index++) { - if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) { + Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask); + for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) { + if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) { // - // If the PML5 entry is not present or is masked, skip it + // If the PML4 entry is not present or is masked, skip it // continue; } - Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask); - for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) { - if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) { + Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask); + PML4EIgnore = FALSE; + for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) { + if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) { // - // If the PML4 entry is not present or is masked, skip it + // If the PDPT entry is not present or is masked, skip it // - continue; - } - Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask); - PML4EIgnore = FALSE; - for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) { - if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) { + if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) { // - // If the PDPT entry is not present or is masked, skip it + // If the PDPT entry is masked, we will ignore checking the PML4 entry // - if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) { + PML4EIgnore = TRUE; + } + continue; + } + if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) { + // + // It's not 1-GByte pages entry, it should be a PDPT entry, + // we will not check PML4 entry more + // + PML4EIgnore = TRUE; + Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask); + PDPTEIgnore = FALSE; + for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) { + if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) { // - // If the PDPT entry is masked, we will ignore checking the PML4 entry + // If the PD entry is not present or is masked, skip it // - PML4EIgnore = TRUE; - } - continue; - } - if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) { - // - // It's not 1-GByte pages entry, it should be a PDPT entry, - // we will not check PML4 entry more - // - PML4EIgnore = TRUE; - Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask); - PDPTEIgnore = FALSE; - for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) { - if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) { - // - // If the PD entry is not present or is masked, skip it - // - if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) { - // - // If the PD entry is masked, we will not PDPT entry more - // - PDPTEIgnore = TRUE; - } - continue; - } - if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) { + if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) { // - // It's not 2 MByte page table entry, it should be PD entry - // we will find the entry has the smallest access record value + // If the PD entry is masked, we will not PDPT entry more // PDPTEIgnore = TRUE; - Acc = GetAndUpdateAccNum (Pdt + PdtIndex); - if (Acc < MinAcc) { - // - // If the PD entry has the smallest access record value, - // save the Page address to be released - // - MinAcc = Acc; - MinPml5 = Pml5Index; - MinPml4 = Pml4Index; - MinPdpt = PdptIndex; - MinPdt = PdtIndex; - ReleasePageAddress = Pdt + PdtIndex; - } } + continue; } - if (!PDPTEIgnore) { + if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) { // - // If this PDPT entry has no PDT entries pointer to 4 KByte pages, - // it should only has the entries point to 2 MByte Pages + // It's not 2 MByte page table entry, it should be PD entry + // we will find the entry has the smallest access record value // - Acc = GetAndUpdateAccNum (Pdpt + PdptIndex); + PDPTEIgnore = TRUE; + Acc = GetAndUpdateAccNum (Pdt + PdtIndex); if (Acc < MinAcc) { // - // If the PDPT entry has the smallest access record value, + // If the PD entry has the smallest access record value, // save the Page address to be released // MinAcc = Acc; - MinPml5 = Pml5Index; MinPml4 = Pml4Index; MinPdpt = PdptIndex; - MinPdt = (UINTN)-1; - ReleasePageAddress = Pdpt + PdptIndex; + MinPdt = PdtIndex; + ReleasePageAddress = Pdt + PdtIndex; } } } - } - if (!PML4EIgnore) { - // - // If PML4 entry has no the PDPT entry pointer to 2 MByte pages, - // it should only has the entries point to 1 GByte Pages - // - Acc = GetAndUpdateAccNum (Pml4 + Pml4Index); - if (Acc < MinAcc) { + if (!PDPTEIgnore) { // - // If the PML4 entry has the smallest access record value, - // save the Page address to be released + // If this PDPT entry has no PDT entries pointer to 4 KByte pages, + // it should only has the entries point to 2 MByte Pages // - MinAcc = Acc; - MinPml5 = Pml5Index; - MinPml4 = Pml4Index; - MinPdpt = (UINTN)-1; - MinPdt = (UINTN)-1; - ReleasePageAddress = Pml4 + Pml4Index; + Acc = GetAndUpdateAccNum (Pdpt + PdptIndex); + if (Acc < MinAcc) { + // + // If the PDPT entry has the smallest access record value, + // save the Page address to be released + // + MinAcc = Acc; + MinPml4 = Pml4Index; + MinPdpt = PdptIndex; + MinPdt = (UINTN)-1; + ReleasePageAddress = Pdpt + PdptIndex; + } } } } + if (!PML4EIgnore) { + // + // If PML4 entry has no the PDPT entry pointer to 2 MByte pages, + // it should only has the entries point to 1 GByte Pages + // + Acc = GetAndUpdateAccNum (Pml4 + Pml4Index); + if (Acc < MinAcc) { + // + // If the PML4 entry has the smallest access record value, + // save the Page address to be released + // + MinAcc = Acc; + MinPml4 = Pml4Index; + MinPdpt = (UINTN)-1; + MinPdt = (UINTN)-1; + ReleasePageAddress = Pml4 + Pml4Index; + } + } } // // Make sure one PML4/PDPT/PD entry is selected @@ -705,7 +588,6 @@ ReclaimPages ( // // If 4 KByte Page Table is released, check the PDPT entry // - Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask); Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask); SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt); if (SubEntriesNum == 0) { @@ -797,7 +679,7 @@ SmiDefaultPFHandler ( ) { UINT64 *PageTable; - UINT64 *PageTableTop; + UINT64 *Pml4; UINT64 PFAddress; UINTN StartBit; UINTN EndBit; @@ -808,8 +690,6 @@ SmiDefaultPFHandler ( UINTN PageAttribute; EFI_STATUS Status; UINT64 *UpperEntry; - BOOLEAN Enable5LevelPaging; - IA32_CR4 Cr4; // // Set default SMM page attribute @@ -819,12 +699,9 @@ SmiDefaultPFHandler ( PageAttribute = 0; EndBit = 0; - PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask); + Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask); PFAddress = AsmReadCr2 (); - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0); - Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute); // // If platform not support page table attribute, set default SMM page attribute @@ -878,9 +755,9 @@ SmiDefaultPFHandler ( } for (Index = 0; Index < NumOfPages; Index++) { - PageTable = PageTableTop; + PageTable = Pml4; UpperEntry = NULL; - for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) { + for (StartBit = 39; StartBit > EndBit; StartBit -= 9) { PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8); if ((PageTable[PTIndex] & IA32_PG_P) == 0) { // @@ -1064,20 +941,13 @@ SetPageTableAttributes ( UINTN Index2; UINTN Index3; UINTN Index4; - UINTN Index5; UINT64 *L1PageTable; UINT64 *L2PageTable; UINT64 *L3PageTable; UINT64 *L4PageTable; - UINT64 *L5PageTable; BOOLEAN IsSplitted; BOOLEAN PageTableSplitted; BOOLEAN CetEnabled; - IA32_CR4 Cr4; - BOOLEAN Enable5LevelPaging; - - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); // // Don't do this if @@ -1121,59 +991,44 @@ SetPageTableAttributes ( do { DEBUG ((DEBUG_INFO, "Start...\n")); PageTableSplitted = FALSE; - L5PageTable = NULL; - if (Enable5LevelPaging) { - L5PageTable = (UINT64 *)GetPageTableBase (); - SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); - PageTableSplitted = (PageTableSplitted || IsSplitted); - } - for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) { - if (Enable5LevelPaging) { - L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); - if (L4PageTable == NULL) { - continue; - } - } else { - L4PageTable = (UINT64 *)GetPageTableBase (); + L4PageTable = (UINT64 *)GetPageTableBase (); + SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); + PageTableSplitted = (PageTableSplitted || IsSplitted); + + for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) { + L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); + if (L3PageTable == NULL) { + continue; } - SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); + + SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); PageTableSplitted = (PageTableSplitted || IsSplitted); - for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) { - L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); - if (L3PageTable == NULL) { + for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) { + if ((L3PageTable[Index3] & IA32_PG_PS) != 0) { + // 1G + continue; + } + L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); + if (L2PageTable == NULL) { continue; } - SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); + SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); PageTableSplitted = (PageTableSplitted || IsSplitted); - for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) { - if ((L3PageTable[Index3] & IA32_PG_PS) != 0) { - // 1G + for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) { + if ((L2PageTable[Index2] & IA32_PG_PS) != 0) { + // 2M continue; } - L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); - if (L2PageTable == NULL) { + L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); + if (L1PageTable == NULL) { continue; } - - SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); + SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); PageTableSplitted = (PageTableSplitted || IsSplitted); - - for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) { - if ((L2PageTable[Index2] & IA32_PG_PS) != 0) { - // 2M - continue; - } - L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); - if (L1PageTable == NULL) { - continue; - } - SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); - PageTableSplitted = (PageTableSplitted || IsSplitted); - } } } } diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm index 271492a9d7..741e4b7da2 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm @@ -69,7 +69,6 @@ extern ASM_PFX(mXdSupported) global ASM_PFX(gPatchXdSupported) global ASM_PFX(gPatchSmiStack) global ASM_PFX(gPatchSmiCr3) -global ASM_PFX(gPatch5LevelPagingSupport) global ASM_PFX(gcSmiHandlerTemplate) global ASM_PFX(gcSmiHandlerSize) @@ -125,17 +124,6 @@ ProtFlatMode: ASM_PFX(gPatchSmiCr3): mov cr3, rax mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3 - - mov cl, strict byte 0 ; source operand will be patched -ASM_PFX(gPatch5LevelPagingSupport): - cmp cl, 0 - je SkipEnable5LevelPaging - ; - ; Enable 5-Level Paging bit - ; - bts eax, 12 ; Set LA57 bit (bit #12) -SkipEnable5LevelPaging: - mov cr4, rax ; in PreModifyMtrrs() to flush TLB. ; Load TSS sub esp, 8 ; reserve room in stack diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c index 63bae5a913..e7c78d36fc 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c @@ -1,7 +1,7 @@ /** @file X64 processor specific functions to enable SMM profile. -Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.
+Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.
Copyright (c) 2017, AMD Incorporated. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent @@ -147,14 +147,9 @@ RestorePageTableAbove4G ( BOOLEAN Existed; UINTN Index; UINTN PFIndex; - IA32_CR4 Cr4; - BOOLEAN Enable5LevelPaging; ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL)); - Cr4.UintN = AsmReadCr4 (); - Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); - // // If page fault address is 4GB above. // @@ -166,48 +161,38 @@ RestorePageTableAbove4G ( // Existed = FALSE; PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK); - PTIndex = 0; - if (Enable5LevelPaging) { - PTIndex = BitFieldRead64 (PFAddress, 48, 56); - } - if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) { - // PML5E - if (Enable5LevelPaging) { - PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - } - PTIndex = BitFieldRead64 (PFAddress, 39, 47); + PTIndex = BitFieldRead64 (PFAddress, 39, 47); + if ((PageTable[PTIndex] & IA32_PG_P) != 0) { + // PML4E + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); + PTIndex = BitFieldRead64 (PFAddress, 30, 38); if ((PageTable[PTIndex] & IA32_PG_P) != 0) { - // PML4E + // PDPTE PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - PTIndex = BitFieldRead64 (PFAddress, 30, 38); - if ((PageTable[PTIndex] & IA32_PG_P) != 0) { - // PDPTE - PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - PTIndex = BitFieldRead64 (PFAddress, 21, 29); - // PD - if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { + PTIndex = BitFieldRead64 (PFAddress, 21, 29); + // PD + if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { + // + // 2MB page + // + Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); + if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) { + Existed = TRUE; + } + } else { + // + // 4KB page + // + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK); + if (PageTable != 0) { // - // 2MB page + // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB. // + PTIndex = BitFieldRead64 (PFAddress, 12, 20); Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) { + if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { Existed = TRUE; } - } else { - // - // 4KB page - // - PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK); - if (PageTable != 0) { - // - // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB. - // - PTIndex = BitFieldRead64 (PFAddress, 12, 20); - Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) { - Existed = TRUE; - } - } } } } @@ -236,11 +221,6 @@ RestorePageTableAbove4G ( // PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK); PFAddress = AsmReadCr2 (); - // PML5E - if (Enable5LevelPaging) { - PTIndex = BitFieldRead64 (PFAddress, 48, 56); - PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); - } // PML4E PTIndex = BitFieldRead64 (PFAddress, 39, 47); PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK); -- cgit v1.2.3