diff options
author | Dun Tan <dun.tan@intel.com> | 2024-07-26 15:32:13 +0800 |
---|---|---|
committer | mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> | 2024-07-29 03:48:53 +0000 |
commit | 51edd4830d822e70b96a8548d0d89383e12bc4c0 (patch) | |
tree | f36845ab181eae192b784a3f9bc71cc6b6f2c7e5 | |
parent | ecb1d67775a6dcaa9a0deb4869ca60e250987e91 (diff) | |
download | edk2-51edd4830d822e70b96a8548d0d89383e12bc4c0.tar.gz edk2-51edd4830d822e70b96a8548d0d89383e12bc4c0.tar.bz2 edk2-51edd4830d822e70b96a8548d0d89383e12bc4c0.zip |
UefiCpuPkg: fix issue when SMM profile is enabled
This commit is to fix smm code assert issue when SMM Profile
is enabled.
When SMM Profile is enabled, the function InitProtectedMemRange()
retrives MMIO ranges from GCD and store the MMIO ranges in the
mProtectionMemRange. When ReadyToLock, the function InitPaging()
modifies the page table based on the mProtectionMemRange. If the
MMIO ranges in mProtectionMemRange is not 4k aligned, code will
assert when modifying page table.
In this commit, we skip the MMIO ranges that BaseAddress and Length
are not 4k aligned when creating mProtectionMemRange. This will only
cause each access to the skipped MMIO range to be logged. In current
failure case on QEMU and QSP SimicsOpenBoard, the skipped MMIO range
is [0xFED00000, 0xFED00400] for HPET. Considering that the probability
of HPET MMIO range being accessed is very small in SMM, the solution
in this commit is acceptable and simple.
Signed-off-by: Dun Tan <dun.tan@intel.com>
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c index 692aad2d15..5c0f9b4a3f 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c @@ -438,8 +438,23 @@ InitProtectedMemRange ( &MemorySpaceMap
);
for (Index = 0; Index < NumberOfDescriptors; Index++) {
- if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
- NumberOfAddedDescriptors++;
+ if ((MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo)) {
+ if (ADDRESS_IS_ALIGNED (MemorySpaceMap[Index].BaseAddress, SIZE_4KB) &&
+ (MemorySpaceMap[Index].Length % SIZE_4KB == 0))
+ {
+ NumberOfAddedDescriptors++;
+ } else {
+ //
+ // Skip the MMIO range that BaseAddress and Length are not 4k aligned since
+ // the minimum granularity of the page table is 4k
+ //
+ DEBUG ((
+ DEBUG_WARN,
+ "MMIO range [0x%lx, 0x%lx] is skipped since it is not 4k aligned.\n",
+ MemorySpaceMap[Index].BaseAddress,
+ MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length
+ ));
+ }
}
}
@@ -486,15 +501,16 @@ InitProtectedMemRange ( // Create MMIO ranges which are set to present and execution-disable.
//
for (Index = 0; Index < NumberOfDescriptors; Index++) {
- if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
- continue;
+ if ((MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) &&
+ ADDRESS_IS_ALIGNED (MemorySpaceMap[Index].BaseAddress, SIZE_4KB) &&
+ (MemorySpaceMap[Index].Length % SIZE_4KB == 0))
+ {
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
+ mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
+ NumberOfProtectRange++;
}
-
- mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
- mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
- mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
- mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
- NumberOfProtectRange++;
}
//
|