summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDun Tan <dun.tan@intel.com>2023-05-15 15:47:54 +0800
committerRay Ni <ray.ni@intel.com>2023-06-30 11:07:40 +0530
commit701b5797b260cbc9477380beb7fb071f3c5c88d1 (patch)
treeb1c0fe99e3fd819136fb50c48fd0c4a35760ee4e
parentd706d9c64ac6074f3066f0531b66f59f4991973b (diff)
downloadedk2-701b5797b260cbc9477380beb7fb071f3c5c88d1.tar.gz
edk2-701b5797b260cbc9477380beb7fb071f3c5c88d1.tar.bz2
edk2-701b5797b260cbc9477380beb7fb071f3c5c88d1.zip
UefiCpuPkg: Add GenSmmPageTable() to create smm page table
This commit is code refinement to current smm pagetable generation code. Add a new GenSmmPageTable() API to create smm page table based on the PageTableMap() API in CpuPageTableLib. Caller only needs to specify the paging mode and the PhysicalAddressBits to map. This function can be used to create both IA32 pae paging and X64 5level, 4level paging. Signed-off-by: Dun Tan <dun.tan@intel.com> Cc: Eric Dong <eric.dong@intel.com> Reviewed-by: Ray Ni <ray.ni@intel.com> Cc: Rahul Kumar <rahul1.kumar@intel.com> Cc: Gerd Hoffmann <kraxel@redhat.com>
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c2
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h15
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c65
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c220
4 files changed, 107 insertions, 195 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
index 9c8107080a..b11264ce4a 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -63,7 +63,7 @@ SmmInitPageTable (
InitializeIDTSmmStackGuard ();
}
- return Gen4GPageTable (TRUE);
+ return GenSmmPageTable (PagingPae, mPhysicalAddressBits);
}
/**
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index 7326c13d01..1989babc67 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -556,6 +556,21 @@ Gen4GPageTable (
);
/**
+ Create page table based on input PagingMode and PhysicalAddressBits in smm.
+
+ @param[in] PagingMode The paging mode.
+ @param[in] PhysicalAddressBits The bits of physical address to map.
+
+ @retval PageTable Address
+
+**/
+UINTN
+GenSmmPageTable (
+ IN PAGING_MODE PagingMode,
+ IN UINT8 PhysicalAddressBits
+ );
+
+/**
Initialize global data for MP synchronization.
@param Stacks Base address of SMI stack buffer for all processors.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
index 8ed4940076..13fc3903fb 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -1647,6 +1647,71 @@ EdkiiSmmClearMemoryAttributes (
}
/**
+ Create page table based on input PagingMode and PhysicalAddressBits in smm.
+
+ @param[in] PagingMode The paging mode.
+ @param[in] PhysicalAddressBits The bits of physical address to map.
+
+ @retval PageTable Address
+
+**/
+UINTN
+GenSmmPageTable (
+ IN PAGING_MODE PagingMode,
+ IN UINT8 PhysicalAddressBits
+ )
+{
+ UINTN PageTableBufferSize;
+ UINTN PageTable;
+ VOID *PageTableBuffer;
+ IA32_MAP_ATTRIBUTE MapAttribute;
+ IA32_MAP_ATTRIBUTE MapMask;
+ RETURN_STATUS Status;
+ UINTN GuardPage;
+ UINTN Index;
+ UINT64 Length;
+
+ Length = LShiftU64 (1, PhysicalAddressBits);
+ PageTable = 0;
+ PageTableBufferSize = 0;
+ MapMask.Uint64 = MAX_UINT64;
+ MapAttribute.Uint64 = mAddressEncMask;
+ MapAttribute.Bits.Present = 1;
+ MapAttribute.Bits.ReadWrite = 1;
+ MapAttribute.Bits.UserSupervisor = 1;
+ MapAttribute.Bits.Accessed = 1;
+ MapAttribute.Bits.Dirty = 1;
+
+ Status = PageTableMap (&PageTable, PagingMode, NULL, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
+ ASSERT (Status == RETURN_BUFFER_TOO_SMALL);
+ DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
+ PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
+ ASSERT (PageTableBuffer != NULL);
+ Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
+ ASSERT (Status == RETURN_SUCCESS);
+ ASSERT (PageTableBufferSize == 0);
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Mark the 4KB guard page between known good stack and smm stack as non-present
+ //
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * (mSmmStackSize + mSmmShadowStackSize);
+ Status = ConvertMemoryPageAttributes (PageTable, PagingMode, GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
+ }
+ }
+
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
+ //
+ // Mark [0, 4k] as non-present
+ //
+ Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
+ }
+
+ return (UINTN)PageTable;
+}
+
+/**
This function retrieves the attributes of the memory region specified by
BaseAddress and Length. If different attributes are got from different part
of the memory region, EFI_NO_MAPPING will be returned.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 46d8ff5d4e..ddd9be66b5 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -168,160 +168,6 @@ CalculateMaximumSupportAddress (
}
/**
- Set static page table.
-
- @param[in] PageTable Address of page table.
- @param[in] PhysicalAddressBits The maximum physical address bits supported.
-**/
-VOID
-SetStaticPageTable (
- IN UINTN PageTable,
- IN UINT8 PhysicalAddressBits
- )
-{
- UINT64 PageAddress;
- UINTN NumberOfPml5EntriesNeeded;
- UINTN NumberOfPml4EntriesNeeded;
- UINTN NumberOfPdpEntriesNeeded;
- UINTN IndexOfPml5Entries;
- UINTN IndexOfPml4Entries;
- UINTN IndexOfPdpEntries;
- UINTN IndexOfPageDirectoryEntries;
- UINT64 *PageMapLevel5Entry;
- UINT64 *PageMapLevel4Entry;
- UINT64 *PageMap;
- UINT64 *PageDirectoryPointerEntry;
- UINT64 *PageDirectory1GEntry;
- UINT64 *PageDirectoryEntry;
-
- //
- // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
- // when 5-Level Paging is disabled.
- //
- ASSERT (PhysicalAddressBits <= 52);
- if (!m5LevelPagingNeeded && (PhysicalAddressBits > 48)) {
- PhysicalAddressBits = 48;
- }
-
- NumberOfPml5EntriesNeeded = 1;
- if (PhysicalAddressBits > 48) {
- NumberOfPml5EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 48);
- PhysicalAddressBits = 48;
- }
-
- NumberOfPml4EntriesNeeded = 1;
- if (PhysicalAddressBits > 39) {
- NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 39);
- PhysicalAddressBits = 39;
- }
-
- NumberOfPdpEntriesNeeded = 1;
- ASSERT (PhysicalAddressBits > 30);
- NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, PhysicalAddressBits - 30);
-
- //
- // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
- //
- PageMap = (VOID *)PageTable;
-
- PageMapLevel4Entry = PageMap;
- PageMapLevel5Entry = NULL;
- if (m5LevelPagingNeeded) {
- //
- // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
- //
- PageMapLevel5Entry = PageMap;
- }
-
- PageAddress = 0;
-
- for ( IndexOfPml5Entries = 0
- ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
- ; IndexOfPml5Entries++, PageMapLevel5Entry++)
- {
- //
- // Each PML5 entry points to a page of PML4 entires.
- // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
- // When 5-Level Paging is disabled, below allocation happens only once.
- //
- if (m5LevelPagingNeeded) {
- PageMapLevel4Entry = (UINT64 *)((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
- if (PageMapLevel4Entry == NULL) {
- PageMapLevel4Entry = AllocatePageTableMemory (1);
- ASSERT (PageMapLevel4Entry != NULL);
- ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE (1));
-
- *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- }
- }
-
- for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
- //
- // Each PML4 entry points to a page of Page Directory Pointer entries.
- //
- PageDirectoryPointerEntry = (UINT64 *)((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
- if (PageDirectoryPointerEntry == NULL) {
- PageDirectoryPointerEntry = AllocatePageTableMemory (1);
- ASSERT (PageDirectoryPointerEntry != NULL);
- ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE (1));
-
- *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- }
-
- if (m1GPageTableSupport) {
- PageDirectory1GEntry = PageDirectoryPointerEntry;
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
- if ((IndexOfPml4Entries == 0) && (IndexOfPageDirectoryEntries < 4)) {
- //
- // Skip the < 4G entries
- //
- continue;
- }
-
- //
- // Fill in the Page Directory entries
- //
- *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
- }
- } else {
- PageAddress = BASE_4GB;
- for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
- if ((IndexOfPml4Entries == 0) && (IndexOfPdpEntries < 4)) {
- //
- // Skip the < 4G entries
- //
- continue;
- }
-
- //
- // Each Directory Pointer entries points to a page of Page Directory entires.
- // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
- //
- PageDirectoryEntry = (UINT64 *)((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
- if (PageDirectoryEntry == NULL) {
- PageDirectoryEntry = AllocatePageTableMemory (1);
- ASSERT (PageDirectoryEntry != NULL);
- ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE (1));
-
- //
- // Fill in a Page Directory Pointer Entries
- //
- *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- }
-
- for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
- //
- // Fill in the Page Directory entries
- //
- *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
- }
- }
- }
- }
- }
-}
-
-/**
Create PageTable for SMM use.
@return The address of PML4 (to set CR3).
@@ -332,15 +178,16 @@ SmmInitPageTable (
VOID
)
{
- EFI_PHYSICAL_ADDRESS Pages;
- UINT64 *PTEntry;
+ UINTN PageTable;
LIST_ENTRY *FreePage;
UINTN Index;
UINTN PageFaultHandlerHookAddress;
IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
EFI_STATUS Status;
+ UINT64 *PdptEntry;
UINT64 *Pml4Entry;
UINT64 *Pml5Entry;
+ UINT8 PhysicalAddressBits;
//
// Initialize spin lock
@@ -357,59 +204,44 @@ SmmInitPageTable (
} else {
mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
}
+
DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
- //
- // Generate PAE page table for the first 4GB memory space
- //
- Pages = Gen4GPageTable (FALSE);
//
- // Set IA32_PG_PMNT bit to mask this entry
+ // Generate initial SMM page table.
+ // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
//
- PTEntry = (UINT64 *)(UINTN)Pages;
- for (Index = 0; Index < 4; Index++) {
- PTEntry[Index] |= IA32_PG_PMNT;
- }
-
- //
- // Fill Page-Table-Level4 (PML4) entry
- //
- Pml4Entry = (UINT64 *)AllocatePageTableMemory (1);
- ASSERT (Pml4Entry != NULL);
- *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
-
- //
- // Set sub-entries number
- //
- SetSubEntriesNum (Pml4Entry, 3);
- PTEntry = Pml4Entry;
+ PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ? mPhysicalAddressBits : 32;
+ PageTable = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
if (m5LevelPagingNeeded) {
+ Pml5Entry = (UINT64 *)PageTable;
//
- // Fill PML5 entry
- //
- Pml5Entry = (UINT64 *)AllocatePageTableMemory (1);
- ASSERT (Pml5Entry != NULL);
- *Pml5Entry = (UINTN)Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
- //
- // Set sub-entries number
+ // Set Pml5Entry sub-entries number for smm PF handler usage.
//
SetSubEntriesNum (Pml5Entry, 1);
- PTEntry = Pml5Entry;
+ Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
+ } else {
+ Pml4Entry = (UINT64 *)PageTable;
+ }
+
+ //
+ // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
+ //
+ PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
+ for (Index = 0; Index < 4; Index++) {
+ PdptEntry[Index] |= IA32_PG_PMNT;
}
- if (mCpuSmmRestrictedMemoryAccess) {
+ if (!mCpuSmmRestrictedMemoryAccess) {
//
- // When access to non-SMRAM memory is restricted, create page table
- // that covers all memory space.
+ // Set Pml4Entry sub-entries number for smm PF handler usage.
//
- SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
- } else {
+ SetSubEntriesNum (Pml4Entry, 3);
+
//
// Add pages to page pool
//
@@ -466,7 +298,7 @@ SmmInitPageTable (
//
// Return the address of PML4/PML5 (to set CR3)
//
- return (UINT32)(UINTN)PTEntry;
+ return (UINT32)PageTable;
}
/**