summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c116
1 files changed, 92 insertions, 24 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
index 12f3c0b8e8..b8c356bfe8 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -1647,49 +1647,115 @@ EdkiiSmmClearMemoryAttributes (
}
/**
- Create page table based on input PagingMode and PhysicalAddressBits in smm.
-
- @param[in] PagingMode The paging mode.
- @param[in] PhysicalAddressBits The bits of physical address to map.
+ Create page table based on input PagingMode, LinearAddress and Length.
- @retval PageTable Address
+ @param[in, out] PageTable The pointer to the page table.
+ @param[in] PagingMode The paging mode.
+ @param[in] LinearAddress The start of the linear address range.
+ @param[in] Length The length of the linear address range.
**/
-UINTN
-GenSmmPageTable (
- IN PAGING_MODE PagingMode,
- IN UINT8 PhysicalAddressBits
+VOID
+GenPageTable (
+ IN OUT UINTN *PageTable,
+ IN PAGING_MODE PagingMode,
+ IN UINT64 LinearAddress,
+ IN UINT64 Length
)
{
+ RETURN_STATUS Status;
UINTN PageTableBufferSize;
- UINTN PageTable;
VOID *PageTableBuffer;
IA32_MAP_ATTRIBUTE MapAttribute;
IA32_MAP_ATTRIBUTE MapMask;
- RETURN_STATUS Status;
- UINTN GuardPage;
- UINTN Index;
- UINT64 Length;
- Length = LShiftU64 (1, PhysicalAddressBits);
- PageTable = 0;
- PageTableBufferSize = 0;
MapMask.Uint64 = MAX_UINT64;
- MapAttribute.Uint64 = mAddressEncMask;
+ MapAttribute.Uint64 = mAddressEncMask|LinearAddress;
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
MapAttribute.Bits.UserSupervisor = 1;
MapAttribute.Bits.Accessed = 1;
MapAttribute.Bits.Dirty = 1;
+ PageTableBufferSize = 0;
+
+ Status = PageTableMap (
+ PageTable,
+ PagingMode,
+ NULL,
+ &PageTableBufferSize,
+ LinearAddress,
+ Length,
+ &MapAttribute,
+ &MapMask,
+ NULL
+ );
+ if (Status == RETURN_BUFFER_TOO_SMALL) {
+ DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
+ PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
+ ASSERT (PageTableBuffer != NULL);
+ Status = PageTableMap (
+ PageTable,
+ PagingMode,
+ PageTableBuffer,
+ &PageTableBufferSize,
+ LinearAddress,
+ Length,
+ &MapAttribute,
+ &MapMask,
+ NULL
+ );
+ }
- Status = PageTableMap (&PageTable, PagingMode, NULL, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
- ASSERT (Status == RETURN_BUFFER_TOO_SMALL);
- DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
- PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
- ASSERT (PageTableBuffer != NULL);
- Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
ASSERT (Status == RETURN_SUCCESS);
ASSERT (PageTableBufferSize == 0);
+}
+
+/**
+ Create page table based on input PagingMode and PhysicalAddressBits in smm.
+
+ @param[in] PagingMode The paging mode.
+ @param[in] PhysicalAddressBits The bits of physical address to map.
+
+ @retval PageTable Address
+
+**/
+UINTN
+GenSmmPageTable (
+ IN PAGING_MODE PagingMode,
+ IN UINT8 PhysicalAddressBits
+ )
+{
+ UINTN PageTable;
+ RETURN_STATUS Status;
+ UINTN GuardPage;
+ UINTN Index;
+ UINT64 Length;
+ PAGING_MODE SmramPagingMode;
+
+ PageTable = 0;
+ Length = LShiftU64 (1, PhysicalAddressBits);
+ ASSERT (Length > mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize);
+
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ SmramPagingMode = m5LevelPagingNeeded ? Paging5Level4KB : Paging4Level4KB;
+ } else {
+ SmramPagingMode = PagingPae4KB;
+ }
+
+ ASSERT (mCpuHotPlugData.SmrrBase % SIZE_4KB == 0);
+ ASSERT (mCpuHotPlugData.SmrrSize % SIZE_4KB == 0);
+ GenPageTable (&PageTable, PagingMode, 0, mCpuHotPlugData.SmrrBase);
+
+ //
+ // Map smram range in 4K page granularity to avoid subsequent page split when smm ready to lock.
+ // If BSP are splitting the 1G/2M paging entries to 512 2M/4K paging entries, and all APs are
+ // still running in SMI at the same time, which might access the affected linear-address range
+ // between the time of modification and the time of invalidation access. That will be a potential
+ // problem leading exception happen.
+ //
+ GenPageTable (&PageTable, SmramPagingMode, mCpuHotPlugData.SmrrBase, mCpuHotPlugData.SmrrSize);
+
+ GenPageTable (&PageTable, PagingMode, mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize, Length - mCpuHotPlugData.SmrrBase - mCpuHotPlugData.SmrrSize);
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
//
@@ -1698,6 +1764,7 @@ GenSmmPageTable (
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * (mSmmStackSize + mSmmShadowStackSize);
Status = ConvertMemoryPageAttributes (PageTable, PagingMode, GuardPage, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
+ ASSERT (Status == RETURN_SUCCESS);
}
}
@@ -1706,6 +1773,7 @@ GenSmmPageTable (
// Mark [0, 4k] as non-present
//
Status = ConvertMemoryPageAttributes (PageTable, PagingMode, 0, SIZE_4KB, EFI_MEMORY_RP, TRUE, NULL);
+ ASSERT (Status == RETURN_SUCCESS);
}
return (UINTN)PageTable;