summaryrefslogtreecommitdiffstats
path: root/MdeModulePkg/Core/Dxe
diff options
context:
space:
mode:
authorJian J Wang <jian.j.wang@intel.com>2017-11-20 16:08:28 +0800
committerHao Wu <hao.a.wu@intel.com>2017-11-21 20:24:37 +0800
commite63da9f033274843163908ccefa95c892d7944e5 (patch)
tree4e23d1922ae238b3c821662ae8b1e6cf841c7488 /MdeModulePkg/Core/Dxe
parenta89b923ea90ed178f74df42ae344cc0a3b24380b (diff)
downloadedk2-e63da9f033274843163908ccefa95c892d7944e5.tar.gz
edk2-e63da9f033274843163908ccefa95c892d7944e5.tar.bz2
edk2-e63da9f033274843163908ccefa95c892d7944e5.zip
MdeModulePkg: Fix unix style of EOL
Cc: Wu Hao <hao.a.wu@intel.com> Cc: Star Zeng <star.zeng@intel.com> Cc: Eric Dong <eric.dong@intel.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang <jian.j.wang@intel.com> Reviewed-by: Hao Wu <hao.a.wu@intel.com>
Diffstat (limited to 'MdeModulePkg/Core/Dxe')
-rw-r--r--MdeModulePkg/Core/Dxe/DxeMain.inf8
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/HeapGuard.c2394
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/HeapGuard.h788
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Imem.h70
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Page.c210
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Pool.c242
-rw-r--r--MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c6
7 files changed, 1859 insertions, 1859 deletions
diff --git a/MdeModulePkg/Core/Dxe/DxeMain.inf b/MdeModulePkg/Core/Dxe/DxeMain.inf
index 9793333a44..f2155fcab1 100644
--- a/MdeModulePkg/Core/Dxe/DxeMain.inf
+++ b/MdeModulePkg/Core/Dxe/DxeMain.inf
@@ -56,7 +56,7 @@
Mem/MemData.c
Mem/Imem.h
Mem/MemoryProfileRecord.c
- Mem/HeapGuard.c
+ Mem/HeapGuard.c
FwVolBlock/FwVolBlock.c
FwVolBlock/FwVolBlock.h
FwVol/FwVolWrite.c
@@ -194,9 +194,9 @@
gEfiMdeModulePkgTokenSpaceGuid.PcdImageProtectionPolicy ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdDxeNxMemoryProtectionPolicy ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdNullPointerDetectionPropertyMask ## CONSUMES
- gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPageType ## CONSUMES
- gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPoolType ## CONSUMES
- gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPropertyMask ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPageType ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPoolType ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPropertyMask ## CONSUMES
# [Hob]
# RESOURCE_DESCRIPTOR ## CONSUMES
diff --git a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
index 752befa44d..30a73fc04d 100644
--- a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
+++ b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
@@ -1,1197 +1,1197 @@
-/** @file
- UEFI Heap Guard functions.
-
-Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
-This program and the accompanying materials
-are licensed and made available under the terms and conditions of the BSD License
-which accompanies this distribution. The full text of the license may be found at
-http://opensource.org/licenses/bsd-license.php
-
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-**/
-
-#include "DxeMain.h"
-#include "Imem.h"
-#include "HeapGuard.h"
-
-//
-// Global to avoid infinite reentrance of memory allocation when updating
-// page table attributes, which may need allocate pages for new PDE/PTE.
-//
-GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
-
-//
-// Pointer to table tracking the Guarded memory with bitmap, in which '1'
-// is used to indicate memory guarded. '0' might be free memory or Guard
-// page itself, depending on status of memory adjacent to it.
-//
-GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
-
-//
-// Current depth level of map table pointed by mGuardedMemoryMap.
-// mMapLevel must be initialized at least by 1. It will be automatically
-// updated according to the address of memory just tracked.
-//
-GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
-
-//
-// Shift and mask for each level of map table
-//
-GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
- = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
-GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
- = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
-
-/**
- Set corresponding bits in bitmap table to 1 according to the address.
-
- @param[in] Address Start address to set for.
- @param[in] BitNumber Number of bits to set.
- @param[in] BitMap Pointer to bitmap which covers the Address.
-
- @return VOID.
-**/
-STATIC
-VOID
-SetBits (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN UINTN BitNumber,
- IN UINT64 *BitMap
- )
-{
- UINTN Lsbs;
- UINTN Qwords;
- UINTN Msbs;
- UINTN StartBit;
- UINTN EndBit;
-
- StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
- EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
-
- if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
- Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
- GUARDED_HEAP_MAP_ENTRY_BITS;
- Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
- Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
- } else {
- Msbs = BitNumber;
- Lsbs = 0;
- Qwords = 0;
- }
-
- if (Msbs > 0) {
- *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
- BitMap += 1;
- }
-
- if (Qwords > 0) {
- SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
- (UINT64)-1);
- BitMap += Qwords;
- }
-
- if (Lsbs > 0) {
- *BitMap |= (LShiftU64 (1, Lsbs) - 1);
- }
-}
-
-/**
- Set corresponding bits in bitmap table to 0 according to the address.
-
- @param[in] Address Start address to set for.
- @param[in] BitNumber Number of bits to set.
- @param[in] BitMap Pointer to bitmap which covers the Address.
-
- @return VOID.
-**/
-STATIC
-VOID
-ClearBits (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN UINTN BitNumber,
- IN UINT64 *BitMap
- )
-{
- UINTN Lsbs;
- UINTN Qwords;
- UINTN Msbs;
- UINTN StartBit;
- UINTN EndBit;
-
- StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
- EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
-
- if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
- Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
- GUARDED_HEAP_MAP_ENTRY_BITS;
- Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
- Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
- } else {
- Msbs = BitNumber;
- Lsbs = 0;
- Qwords = 0;
- }
-
- if (Msbs > 0) {
- *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
- BitMap += 1;
- }
-
- if (Qwords > 0) {
- SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
- BitMap += Qwords;
- }
-
- if (Lsbs > 0) {
- *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
- }
-}
-
-/**
- Get corresponding bits in bitmap table according to the address.
-
- The value of bit 0 corresponds to the status of memory at given Address.
- No more than 64 bits can be retrieved in one call.
-
- @param[in] Address Start address to retrieve bits for.
- @param[in] BitNumber Number of bits to get.
- @param[in] BitMap Pointer to bitmap which covers the Address.
-
- @return An integer containing the bits information.
-**/
-STATIC
-UINT64
-GetBits (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN UINTN BitNumber,
- IN UINT64 *BitMap
- )
-{
- UINTN StartBit;
- UINTN EndBit;
- UINTN Lsbs;
- UINTN Msbs;
- UINT64 Result;
-
- ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
-
- StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
- EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
-
- if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
- Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
- Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
- } else {
- Msbs = BitNumber;
- Lsbs = 0;
- }
-
- Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
- if (Lsbs > 0) {
- BitMap += 1;
- Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
- }
-
- return Result;
-}
-
-/**
- Locate the pointer of bitmap from the guarded memory bitmap tables, which
- covers the given Address.
-
- @param[in] Address Start address to search the bitmap for.
- @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
- @param[out] BitMap Pointer to bitmap which covers the Address.
-
- @return The bit number from given Address to the end of current map table.
-**/
-UINTN
-FindGuardedMemoryMap (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN BOOLEAN AllocMapUnit,
- OUT UINT64 **BitMap
- )
-{
- UINTN Level;
- UINT64 *GuardMap;
- UINT64 MapMemory;
- UINTN Index;
- UINTN Size;
- UINTN BitsToUnitEnd;
- EFI_STATUS Status;
-
- //
- // Adjust current map table depth according to the address to access
- //
- while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
- &&
- RShiftU64 (
- Address,
- mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
- ) != 0) {
-
- if (mGuardedMemoryMap != 0) {
- Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
- * GUARDED_HEAP_MAP_ENTRY_BYTES;
- Status = CoreInternalAllocatePages (
- AllocateAnyPages,
- EfiBootServicesData,
- EFI_SIZE_TO_PAGES (Size),
- &MapMemory,
- FALSE
- );
- ASSERT_EFI_ERROR (Status);
- ASSERT (MapMemory != 0);
-
- SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
-
- *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
- mGuardedMemoryMap = MapMemory;
- }
-
- mMapLevel++;
-
- }
-
- GuardMap = &mGuardedMemoryMap;
- for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
- Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
- ++Level) {
-
- if (*GuardMap == 0) {
- if (!AllocMapUnit) {
- GuardMap = NULL;
- break;
- }
-
- Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
- Status = CoreInternalAllocatePages (
- AllocateAnyPages,
- EfiBootServicesData,
- EFI_SIZE_TO_PAGES (Size),
- &MapMemory,
- FALSE
- );
- ASSERT_EFI_ERROR (Status);
- ASSERT (MapMemory != 0);
-
- SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
- *GuardMap = MapMemory;
- }
-
- Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
- Index &= mLevelMask[Level];
- GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
-
- }
-
- BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
- *BitMap = GuardMap;
-
- return BitsToUnitEnd;
-}
-
-/**
- Set corresponding bits in bitmap table to 1 according to given memory range.
-
- @param[in] Address Memory address to guard from.
- @param[in] NumberOfPages Number of pages to guard.
-
- @return VOID.
-**/
-VOID
-EFIAPI
-SetGuardedMemoryBits (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN UINTN NumberOfPages
- )
-{
- UINT64 *BitMap;
- UINTN Bits;
- UINTN BitsToUnitEnd;
-
- while (NumberOfPages > 0) {
- BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
- ASSERT (BitMap != NULL);
-
- if (NumberOfPages > BitsToUnitEnd) {
- // Cross map unit
- Bits = BitsToUnitEnd;
- } else {
- Bits = NumberOfPages;
- }
-
- SetBits (Address, Bits, BitMap);
-
- NumberOfPages -= Bits;
- Address += EFI_PAGES_TO_SIZE (Bits);
- }
-}
-
-/**
- Clear corresponding bits in bitmap table according to given memory range.
-
- @param[in] Address Memory address to unset from.
- @param[in] NumberOfPages Number of pages to unset guard.
-
- @return VOID.
-**/
-VOID
-EFIAPI
-ClearGuardedMemoryBits (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN UINTN NumberOfPages
- )
-{
- UINT64 *BitMap;
- UINTN Bits;
- UINTN BitsToUnitEnd;
-
- while (NumberOfPages > 0) {
- BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
- ASSERT (BitMap != NULL);
-
- if (NumberOfPages > BitsToUnitEnd) {
- // Cross map unit
- Bits = BitsToUnitEnd;
- } else {
- Bits = NumberOfPages;
- }
-
- ClearBits (Address, Bits, BitMap);
-
- NumberOfPages -= Bits;
- Address += EFI_PAGES_TO_SIZE (Bits);
- }
-}
-
-/**
- Retrieve corresponding bits in bitmap table according to given memory range.
-
- @param[in] Address Memory address to retrieve from.
- @param[in] NumberOfPages Number of pages to retrieve.
-
- @return An integer containing the guarded memory bitmap.
-**/
-UINTN
-GetGuardedMemoryBits (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN UINTN NumberOfPages
- )
-{
- UINT64 *BitMap;
- UINTN Bits;
- UINTN Result;
- UINTN Shift;
- UINTN BitsToUnitEnd;
-
- ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
-
- Result = 0;
- Shift = 0;
- while (NumberOfPages > 0) {
- BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
-
- if (NumberOfPages > BitsToUnitEnd) {
- // Cross map unit
- Bits = BitsToUnitEnd;
- } else {
- Bits = NumberOfPages;
- }
-
- if (BitMap != NULL) {
- Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
- }
-
- Shift += Bits;
- NumberOfPages -= Bits;
- Address += EFI_PAGES_TO_SIZE (Bits);
- }
-
- return Result;
-}
-
-/**
- Get bit value in bitmap table for the given address.
-
- @param[in] Address The address to retrieve for.
-
- @return 1 or 0.
-**/
-UINTN
-EFIAPI
-GetGuardMapBit (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- UINT64 *GuardMap;
-
- FindGuardedMemoryMap (Address, FALSE, &GuardMap);
- if (GuardMap != NULL) {
- if (RShiftU64 (*GuardMap,
- GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
- return 1;
- }
- }
-
- return 0;
-}
-
-/**
- Set the bit in bitmap table for the given address.
-
- @param[in] Address The address to set for.
-
- @return VOID.
-**/
-VOID
-EFIAPI
-SetGuardMapBit (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- UINT64 *GuardMap;
- UINT64 BitMask;
-
- FindGuardedMemoryMap (Address, TRUE, &GuardMap);
- if (GuardMap != NULL) {
- BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
- *GuardMap |= BitMask;
- }
-}
-
-/**
- Clear the bit in bitmap table for the given address.
-
- @param[in] Address The address to clear for.
-
- @return VOID.
-**/
-VOID
-EFIAPI
-ClearGuardMapBit (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- UINT64 *GuardMap;
- UINT64 BitMask;
-
- FindGuardedMemoryMap (Address, TRUE, &GuardMap);
- if (GuardMap != NULL) {
- BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
- *GuardMap &= ~BitMask;
- }
-}
-
-/**
- Check to see if the page at the given address is a Guard page or not.
-
- @param[in] Address The address to check for.
-
- @return TRUE The page at Address is a Guard page.
- @return FALSE The page at Address is not a Guard page.
-**/
-BOOLEAN
-EFIAPI
-IsGuardPage (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- UINTN BitMap;
-
- //
- // There must be at least one guarded page before and/or after given
- // address if it's a Guard page. The bitmap pattern should be one of
- // 001, 100 and 101
- //
- BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
- return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
-}
-
-/**
- Check to see if the page at the given address is a head Guard page or not.
-
- @param[in] Address The address to check for
-
- @return TRUE The page at Address is a head Guard page
- @return FALSE The page at Address is not a head Guard page
-**/
-BOOLEAN
-EFIAPI
-IsHeadGuard (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- return (GetGuardedMemoryBits (Address, 2) == BIT1);
-}
-
-/**
- Check to see if the page at the given address is a tail Guard page or not.
-
- @param[in] Address The address to check for.
-
- @return TRUE The page at Address is a tail Guard page.
- @return FALSE The page at Address is not a tail Guard page.
-**/
-BOOLEAN
-EFIAPI
-IsTailGuard (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
-}
-
-/**
- Check to see if the page at the given address is guarded or not.
-
- @param[in] Address The address to check for.
-
- @return TRUE The page at Address is guarded.
- @return FALSE The page at Address is not guarded.
-**/
-BOOLEAN
-EFIAPI
-IsMemoryGuarded (
- IN EFI_PHYSICAL_ADDRESS Address
- )
-{
- return (GetGuardMapBit (Address) == 1);
-}
-
-/**
- Set the page at the given address to be a Guard page.
-
- This is done by changing the page table attribute to be NOT PRSENT.
-
- @param[in] BaseAddress Page address to Guard at
-
- @return VOID
-**/
-VOID
-EFIAPI
-SetGuardPage (
- IN EFI_PHYSICAL_ADDRESS BaseAddress
- )
-{
- //
- // Set flag to make sure allocating memory without GUARD for page table
- // operation; otherwise infinite loops could be caused.
- //
- mOnGuarding = TRUE;
- //
- // Note: This might overwrite other attributes needed by other features,
- // such as memory protection (NX). Please make sure they are not enabled
- // at the same time.
- //
- gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
- mOnGuarding = FALSE;
-}
-
-/**
- Unset the Guard page at the given address to the normal memory.
-
- This is done by changing the page table attribute to be PRSENT.
-
- @param[in] BaseAddress Page address to Guard at.
-
- @return VOID.
-**/
-VOID
-EFIAPI
-UnsetGuardPage (
- IN EFI_PHYSICAL_ADDRESS BaseAddress
- )
-{
- //
- // Set flag to make sure allocating memory without GUARD for page table
- // operation; otherwise infinite loops could be caused.
- //
- mOnGuarding = TRUE;
- //
- // Note: This might overwrite other attributes needed by other features,
- // such as memory protection (NX). Please make sure they are not enabled
- // at the same time.
- //
- gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, 0);
- mOnGuarding = FALSE;
-}
-
-/**
- Check to see if the memory at the given address should be guarded or not.
-
- @param[in] MemoryType Memory type to check.
- @param[in] AllocateType Allocation type to check.
- @param[in] PageOrPool Indicate a page allocation or pool allocation.
-
-
- @return TRUE The given type of memory should be guarded.
- @return FALSE The given type of memory should not be guarded.
-**/
-BOOLEAN
-IsMemoryTypeToGuard (
- IN EFI_MEMORY_TYPE MemoryType,
- IN EFI_ALLOCATE_TYPE AllocateType,
- IN UINT8 PageOrPool
- )
-{
- UINT64 TestBit;
- UINT64 ConfigBit;
- BOOLEAN InSmm;
-
- if (gCpu == NULL || AllocateType == AllocateAddress) {
- return FALSE;
- }
-
- InSmm = FALSE;
- if (gSmmBase2 != NULL) {
- gSmmBase2->InSmm (gSmmBase2, &InSmm);
- }
-
- if (InSmm) {
- return FALSE;
- }
-
- if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
- return FALSE;
- }
-
- if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
- ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
- } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
- ConfigBit = PcdGet64 (PcdHeapGuardPageType);
- } else {
- ConfigBit = (UINT64)-1;
- }
-
- if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
- TestBit = BIT63;
- } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
- TestBit = BIT62;
- } else if (MemoryType < EfiMaxMemoryType) {
- TestBit = LShiftU64 (1, MemoryType);
- } else if (MemoryType == EfiMaxMemoryType) {
- TestBit = (UINT64)-1;
- } else {
- TestBit = 0;
- }
-
- return ((ConfigBit & TestBit) != 0);
-}
-
-/**
- Check to see if the pool at the given address should be guarded or not.
-
- @param[in] MemoryType Pool type to check.
-
-
- @return TRUE The given type of pool should be guarded.
- @return FALSE The given type of pool should not be guarded.
-**/
-BOOLEAN
-IsPoolTypeToGuard (
- IN EFI_MEMORY_TYPE MemoryType
- )
-{
- return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
- GUARD_HEAP_TYPE_POOL);
-}
-
-/**
- Check to see if the page at the given address should be guarded or not.
-
- @param[in] MemoryType Page type to check.
- @param[in] AllocateType Allocation type to check.
-
- @return TRUE The given type of page should be guarded.
- @return FALSE The given type of page should not be guarded.
-**/
-BOOLEAN
-IsPageTypeToGuard (
- IN EFI_MEMORY_TYPE MemoryType,
- IN EFI_ALLOCATE_TYPE AllocateType
- )
-{
- return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
-}
-
-/**
- Set head Guard and tail Guard for the given memory range.
-
- @param[in] Memory Base address of memory to set guard for.
- @param[in] NumberOfPages Memory size in pages.
-
- @return VOID
-**/
-VOID
-SetGuardForMemory (
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
- )
-{
- EFI_PHYSICAL_ADDRESS GuardPage;
-
- //
- // Set tail Guard
- //
- GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
- if (!IsGuardPage (GuardPage)) {
- SetGuardPage (GuardPage);
- }
-
- // Set head Guard
- GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
- if (!IsGuardPage (GuardPage)) {
- SetGuardPage (GuardPage);
- }
-
- //
- // Mark the memory range as Guarded
- //
- SetGuardedMemoryBits (Memory, NumberOfPages);
-}
-
-/**
- Unset head Guard and tail Guard for the given memory range.
-
- @param[in] Memory Base address of memory to unset guard for.
- @param[in] NumberOfPages Memory size in pages.
-
- @return VOID
-**/
-VOID
-UnsetGuardForMemory (
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
- )
-{
- EFI_PHYSICAL_ADDRESS GuardPage;
-
- if (NumberOfPages == 0) {
- return;
- }
-
- //
- // Head Guard must be one page before, if any.
- //
- GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
- if (IsHeadGuard (GuardPage)) {
- if (!IsMemoryGuarded (GuardPage - EFI_PAGES_TO_SIZE (1))) {
- //
- // If the head Guard is not a tail Guard of adjacent memory block,
- // unset it.
- //
- UnsetGuardPage (GuardPage);
- }
- } else if (IsMemoryGuarded (GuardPage)) {
- //
- // Pages before memory to free are still in Guard. It's a partial free
- // case. Turn first page of memory block to free into a new Guard.
- //
- SetGuardPage (Memory);
- }
-
- //
- // Tail Guard must be the page after this memory block to free, if any.
- //
- GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
- if (IsTailGuard (GuardPage)) {
- if (!IsMemoryGuarded (GuardPage + EFI_PAGES_TO_SIZE (1))) {
- //
- // If the tail Guard is not a head Guard of adjacent memory block,
- // free it; otherwise, keep it.
- //
- UnsetGuardPage (GuardPage);
- }
- } else if (IsMemoryGuarded (GuardPage)) {
- //
- // Pages after memory to free are still in Guard. It's a partial free
- // case. We need to keep one page to be a head Guard.
- //
- SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
- }
-
- //
- // No matter what, we just clear the mark of the Guarded memory.
- //
- ClearGuardedMemoryBits(Memory, NumberOfPages);
-}
-
-/**
- Adjust address of free memory according to existing and/or required Guard.
-
- This function will check if there're existing Guard pages of adjacent
- memory blocks, and try to use it as the Guard page of the memory to be
- allocated.
-
- @param[in] Start Start address of free memory block.
- @param[in] Size Size of free memory block.
- @param[in] SizeRequested Size of memory to allocate.
-
- @return The end address of memory block found.
- @return 0 if no enough space for the required size of memory and its Guard.
-**/
-UINT64
-AdjustMemoryS (
- IN UINT64 Start,
- IN UINT64 Size,
- IN UINT64 SizeRequested
- )
-{
- UINT64 Target;
-
- Target = Start + Size - SizeRequested;
-
- //
- // At least one more page needed for Guard page.
- //
- if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
- return 0;
- }
-
- if (!IsGuardPage (Start + Size)) {
- // No Guard at tail to share. One more page is needed.
- Target -= EFI_PAGES_TO_SIZE (1);
- }
-
- // Out of range?
- if (Target < Start) {
- return 0;
- }
-
- // At the edge?
- if (Target == Start) {
- if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
- // No enough space for a new head Guard if no Guard at head to share.
- return 0;
- }
- }
-
- // OK, we have enough pages for memory and its Guards. Return the End of the
- // free space.
- return Target + SizeRequested - 1;
-}
-
-/**
- Adjust the start address and number of pages to free according to Guard.
-
- The purpose of this function is to keep the shared Guard page with adjacent
- memory block if it's still in guard, or free it if no more sharing. Another
- is to reserve pages as Guard pages in partial page free situation.
-
- @param[in,out] Memory Base address of memory to free.
- @param[in,out] NumberOfPages Size of memory to free.
-
- @return VOID.
-**/
-VOID
-AdjustMemoryF (
- IN OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN OUT UINTN *NumberOfPages
- )
-{
- EFI_PHYSICAL_ADDRESS Start;
- EFI_PHYSICAL_ADDRESS MemoryToTest;
- UINTN PagesToFree;
-
- if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
- return;
- }
-
- Start = *Memory;
- PagesToFree = *NumberOfPages;
-
- //
- // Head Guard must be one page before, if any.
- //
- MemoryToTest = Start - EFI_PAGES_TO_SIZE (1);
- if (IsHeadGuard (MemoryToTest)) {
- if (!IsMemoryGuarded (MemoryToTest - EFI_PAGES_TO_SIZE (1))) {
- //
- // If the head Guard is not a tail Guard of adjacent memory block,
- // free it; otherwise, keep it.
- //
- Start -= EFI_PAGES_TO_SIZE (1);
- PagesToFree += 1;
- }
- } else if (IsMemoryGuarded (MemoryToTest)) {
- //
- // Pages before memory to free are still in Guard. It's a partial free
- // case. We need to keep one page to be a tail Guard.
- //
- Start += EFI_PAGES_TO_SIZE (1);
- PagesToFree -= 1;
- }
-
- //
- // Tail Guard must be the page after this memory block to free, if any.
- //
- MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
- if (IsTailGuard (MemoryToTest)) {
- if (!IsMemoryGuarded (MemoryToTest + EFI_PAGES_TO_SIZE (1))) {
- //
- // If the tail Guard is not a head Guard of adjacent memory block,
- // free it; otherwise, keep it.
- //
- PagesToFree += 1;
- }
- } else if (IsMemoryGuarded (MemoryToTest)) {
- //
- // Pages after memory to free are still in Guard. It's a partial free
- // case. We need to keep one page to be a head Guard.
- //
- PagesToFree -= 1;
- }
-
- *Memory = Start;
- *NumberOfPages = PagesToFree;
-}
-
-/**
- Adjust the base and number of pages to really allocate according to Guard.
-
- @param[in,out] Memory Base address of free memory.
- @param[in,out] NumberOfPages Size of memory to allocate.
-
- @return VOID.
-**/
-VOID
-AdjustMemoryA (
- IN OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN OUT UINTN *NumberOfPages
- )
-{
- //
- // FindFreePages() has already taken the Guard into account. It's safe to
- // adjust the start address and/or number of pages here, to make sure that
- // the Guards are also "allocated".
- //
- if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
- // No tail Guard, add one.
- *NumberOfPages += 1;
- }
-
- if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
- // No head Guard, add one.
- *Memory -= EFI_PAGE_SIZE;
- *NumberOfPages += 1;
- }
-}
-
-/**
- Adjust the pool head position to make sure the Guard page is adjavent to
- pool tail or pool head.
-
- @param[in] Memory Base address of memory allocated.
- @param[in] NoPages Number of pages actually allocated.
- @param[in] Size Size of memory requested.
- (plus pool head/tail overhead)
-
- @return Address of pool head.
-**/
-VOID *
-AdjustPoolHeadA (
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NoPages,
- IN UINTN Size
- )
-{
- if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
- //
- // Pool head is put near the head Guard
- //
- return (VOID *)(UINTN)Memory;
- }
-
- //
- // Pool head is put near the tail Guard
- //
- return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
-}
-
-/**
- Get the page base address according to pool head address.
-
- @param[in] Memory Head address of pool to free.
-
- @return Address of pool head.
-**/
-VOID *
-AdjustPoolHeadF (
- IN EFI_PHYSICAL_ADDRESS Memory
- )
-{
- if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
- //
- // Pool head is put near the head Guard
- //
- return (VOID *)(UINTN)Memory;
- }
-
- //
- // Pool head is put near the tail Guard
- //
- return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
-}
-
-/**
- Allocate or free guarded memory.
-
- @param[in] Start Start address of memory to allocate or free.
- @param[in] NumberOfPages Memory size in pages.
- @param[in] NewType Memory type to convert to.
-
- @return VOID.
-**/
-EFI_STATUS
-CoreConvertPagesWithGuard (
- IN UINT64 Start,
- IN UINTN NumberOfPages,
- IN EFI_MEMORY_TYPE NewType
- )
-{
- if (NewType == EfiConventionalMemory) {
- AdjustMemoryF (&Start, &NumberOfPages);
- } else {
- AdjustMemoryA (&Start, &NumberOfPages);
- }
-
- return CoreConvertPages(Start, NumberOfPages, NewType);
-}
-
-/**
- Helper function to convert a UINT64 value in binary to a string.
-
- @param[in] Value Value of a UINT64 integer.
- @param[out] BinString String buffer to contain the conversion result.
-
- @return VOID.
-**/
-VOID
-Uint64ToBinString (
- IN UINT64 Value,
- OUT CHAR8 *BinString
- )
-{
- UINTN Index;
-
- if (BinString == NULL) {
- return;
- }
-
- for (Index = 64; Index > 0; --Index) {
- BinString[Index - 1] = '0' + (Value & 1);
- Value = RShiftU64 (Value, 1);
- }
- BinString[64] = '\0';
-}
-
-/**
- Dump the guarded memory bit map.
-**/
-VOID
-EFIAPI
-DumpGuardedMemoryBitmap (
- VOID
- )
-{
- UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
- UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
- UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
- UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
- UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
- UINT64 TableEntry;
- UINT64 Address;
- INTN Level;
- UINTN RepeatZero;
- CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
- CHAR8 *Ruler1;
- CHAR8 *Ruler2;
-
- if (mGuardedMemoryMap == 0) {
- return;
- }
-
- Ruler1 = " 3 2 1 0";
- Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
-
- DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
- " Guarded Memory Bitmap "
- "==============================\r\n"));
- DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
- DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
-
- CopyMem (Entries, mLevelMask, sizeof (Entries));
- CopyMem (Shifts, mLevelShift, sizeof (Shifts));
-
- SetMem (Indices, sizeof(Indices), 0);
- SetMem (Tables, sizeof(Tables), 0);
- SetMem (Addresses, sizeof(Addresses), 0);
-
- Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
- Tables[Level] = mGuardedMemoryMap;
- Address = 0;
- RepeatZero = 0;
-
- while (TRUE) {
- if (Indices[Level] > Entries[Level]) {
-
- Tables[Level] = 0;
- Level -= 1;
- RepeatZero = 0;
-
- DEBUG ((
- HEAP_GUARD_DEBUG_LEVEL,
- "========================================="
- "=========================================\r\n"
- ));
-
- } else {
-
- TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
- Address = Addresses[Level];
-
- if (TableEntry == 0) {
-
- if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
- if (RepeatZero == 0) {
- Uint64ToBinString(TableEntry, String);
- DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
- } else if (RepeatZero == 1) {
- DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
- }
- RepeatZero += 1;
- }
-
- } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
-
- Level += 1;
- Tables[Level] = TableEntry;
- Addresses[Level] = Address;
- Indices[Level] = 0;
- RepeatZero = 0;
-
- continue;
-
- } else {
-
- RepeatZero = 0;
- Uint64ToBinString(TableEntry, String);
- DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
-
- }
- }
-
- if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
- break;
- }
-
- Indices[Level] += 1;
- Address = (Level == 0) ? 0 : Addresses[Level - 1];
- Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
-
- }
-}
-
+/** @file
+ UEFI Heap Guard functions.
+
+Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "DxeMain.h"
+#include "Imem.h"
+#include "HeapGuard.h"
+
+//
+// Global to avoid infinite reentrance of memory allocation when updating
+// page table attributes, which may need allocate pages for new PDE/PTE.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
+
+//
+// Pointer to table tracking the Guarded memory with bitmap, in which '1'
+// is used to indicate memory guarded. '0' might be free memory or Guard
+// page itself, depending on status of memory adjacent to it.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
+
+//
+// Current depth level of map table pointed by mGuardedMemoryMap.
+// mMapLevel must be initialized at least by 1. It will be automatically
+// updated according to the address of memory just tracked.
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
+
+//
+// Shift and mask for each level of map table
+//
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
+ = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
+ = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
+
+/**
+ Set corresponding bits in bitmap table to 1 according to the address.
+
+ @param[in] Address Start address to set for.
+ @param[in] BitNumber Number of bits to set.
+ @param[in] BitMap Pointer to bitmap which covers the Address.
+
+ @return VOID.
+**/
+STATIC
+VOID
+SetBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN BitNumber,
+ IN UINT64 *BitMap
+ )
+{
+ UINTN Lsbs;
+ UINTN Qwords;
+ UINTN Msbs;
+ UINTN StartBit;
+ UINTN EndBit;
+
+ StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
+ EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+
+ if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
+ Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
+ GUARDED_HEAP_MAP_ENTRY_BITS;
+ Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+ Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
+ } else {
+ Msbs = BitNumber;
+ Lsbs = 0;
+ Qwords = 0;
+ }
+
+ if (Msbs > 0) {
+ *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
+ BitMap += 1;
+ }
+
+ if (Qwords > 0) {
+ SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
+ (UINT64)-1);
+ BitMap += Qwords;
+ }
+
+ if (Lsbs > 0) {
+ *BitMap |= (LShiftU64 (1, Lsbs) - 1);
+ }
+}
+
+/**
+ Set corresponding bits in bitmap table to 0 according to the address.
+
+ @param[in] Address Start address to set for.
+ @param[in] BitNumber Number of bits to set.
+ @param[in] BitMap Pointer to bitmap which covers the Address.
+
+ @return VOID.
+**/
+STATIC
+VOID
+ClearBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN BitNumber,
+ IN UINT64 *BitMap
+ )
+{
+ UINTN Lsbs;
+ UINTN Qwords;
+ UINTN Msbs;
+ UINTN StartBit;
+ UINTN EndBit;
+
+ StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
+ EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+
+ if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
+ Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
+ GUARDED_HEAP_MAP_ENTRY_BITS;
+ Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+ Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
+ } else {
+ Msbs = BitNumber;
+ Lsbs = 0;
+ Qwords = 0;
+ }
+
+ if (Msbs > 0) {
+ *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
+ BitMap += 1;
+ }
+
+ if (Qwords > 0) {
+ SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
+ BitMap += Qwords;
+ }
+
+ if (Lsbs > 0) {
+ *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
+ }
+}
+
+/**
+ Get corresponding bits in bitmap table according to the address.
+
+ The value of bit 0 corresponds to the status of memory at given Address.
+ No more than 64 bits can be retrieved in one call.
+
+ @param[in] Address Start address to retrieve bits for.
+ @param[in] BitNumber Number of bits to get.
+ @param[in] BitMap Pointer to bitmap which covers the Address.
+
+ @return An integer containing the bits information.
+**/
+STATIC
+UINT64
+GetBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN BitNumber,
+ IN UINT64 *BitMap
+ )
+{
+ UINTN StartBit;
+ UINTN EndBit;
+ UINTN Lsbs;
+ UINTN Msbs;
+ UINT64 Result;
+
+ ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
+
+ StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
+ EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+
+ if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
+ Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
+ Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
+ } else {
+ Msbs = BitNumber;
+ Lsbs = 0;
+ }
+
+ Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
+ if (Lsbs > 0) {
+ BitMap += 1;
+ Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
+ }
+
+ return Result;
+}
+
+/**
+ Locate the pointer of bitmap from the guarded memory bitmap tables, which
+ covers the given Address.
+
+ @param[in] Address Start address to search the bitmap for.
+ @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
+ @param[out] BitMap Pointer to bitmap which covers the Address.
+
+ @return The bit number from given Address to the end of current map table.
+**/
+UINTN
+FindGuardedMemoryMap (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN AllocMapUnit,
+ OUT UINT64 **BitMap
+ )
+{
+ UINTN Level;
+ UINT64 *GuardMap;
+ UINT64 MapMemory;
+ UINTN Index;
+ UINTN Size;
+ UINTN BitsToUnitEnd;
+ EFI_STATUS Status;
+
+ //
+ // Adjust current map table depth according to the address to access
+ //
+ while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
+ &&
+ RShiftU64 (
+ Address,
+ mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
+ ) != 0) {
+
+ if (mGuardedMemoryMap != 0) {
+ Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
+ * GUARDED_HEAP_MAP_ENTRY_BYTES;
+ Status = CoreInternalAllocatePages (
+ AllocateAnyPages,
+ EfiBootServicesData,
+ EFI_SIZE_TO_PAGES (Size),
+ &MapMemory,
+ FALSE
+ );
+ ASSERT_EFI_ERROR (Status);
+ ASSERT (MapMemory != 0);
+
+ SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
+
+ *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
+ mGuardedMemoryMap = MapMemory;
+ }
+
+ mMapLevel++;
+
+ }
+
+ GuardMap = &mGuardedMemoryMap;
+ for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
+ Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
+ ++Level) {
+
+ if (*GuardMap == 0) {
+ if (!AllocMapUnit) {
+ GuardMap = NULL;
+ break;
+ }
+
+ Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
+ Status = CoreInternalAllocatePages (
+ AllocateAnyPages,
+ EfiBootServicesData,
+ EFI_SIZE_TO_PAGES (Size),
+ &MapMemory,
+ FALSE
+ );
+ ASSERT_EFI_ERROR (Status);
+ ASSERT (MapMemory != 0);
+
+ SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
+ *GuardMap = MapMemory;
+ }
+
+ Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
+ Index &= mLevelMask[Level];
+ GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
+
+ }
+
+ BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
+ *BitMap = GuardMap;
+
+ return BitsToUnitEnd;
+}
+
+/**
+ Set corresponding bits in bitmap table to 1 according to given memory range.
+
+ @param[in] Address Memory address to guard from.
+ @param[in] NumberOfPages Number of pages to guard.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+SetGuardedMemoryBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN NumberOfPages
+ )
+{
+ UINT64 *BitMap;
+ UINTN Bits;
+ UINTN BitsToUnitEnd;
+
+ while (NumberOfPages > 0) {
+ BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
+ ASSERT (BitMap != NULL);
+
+ if (NumberOfPages > BitsToUnitEnd) {
+ // Cross map unit
+ Bits = BitsToUnitEnd;
+ } else {
+ Bits = NumberOfPages;
+ }
+
+ SetBits (Address, Bits, BitMap);
+
+ NumberOfPages -= Bits;
+ Address += EFI_PAGES_TO_SIZE (Bits);
+ }
+}
+
+/**
+ Clear corresponding bits in bitmap table according to given memory range.
+
+ @param[in] Address Memory address to unset from.
+ @param[in] NumberOfPages Number of pages to unset guard.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+ClearGuardedMemoryBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN NumberOfPages
+ )
+{
+ UINT64 *BitMap;
+ UINTN Bits;
+ UINTN BitsToUnitEnd;
+
+ while (NumberOfPages > 0) {
+ BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
+ ASSERT (BitMap != NULL);
+
+ if (NumberOfPages > BitsToUnitEnd) {
+ // Cross map unit
+ Bits = BitsToUnitEnd;
+ } else {
+ Bits = NumberOfPages;
+ }
+
+ ClearBits (Address, Bits, BitMap);
+
+ NumberOfPages -= Bits;
+ Address += EFI_PAGES_TO_SIZE (Bits);
+ }
+}
+
+/**
+ Retrieve corresponding bits in bitmap table according to given memory range.
+
+ @param[in] Address Memory address to retrieve from.
+ @param[in] NumberOfPages Number of pages to retrieve.
+
+ @return An integer containing the guarded memory bitmap.
+**/
+UINTN
+GetGuardedMemoryBits (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN UINTN NumberOfPages
+ )
+{
+ UINT64 *BitMap;
+ UINTN Bits;
+ UINTN Result;
+ UINTN Shift;
+ UINTN BitsToUnitEnd;
+
+ ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
+
+ Result = 0;
+ Shift = 0;
+ while (NumberOfPages > 0) {
+ BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
+
+ if (NumberOfPages > BitsToUnitEnd) {
+ // Cross map unit
+ Bits = BitsToUnitEnd;
+ } else {
+ Bits = NumberOfPages;
+ }
+
+ if (BitMap != NULL) {
+ Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
+ }
+
+ Shift += Bits;
+ NumberOfPages -= Bits;
+ Address += EFI_PAGES_TO_SIZE (Bits);
+ }
+
+ return Result;
+}
+
+/**
+ Get bit value in bitmap table for the given address.
+
+ @param[in] Address The address to retrieve for.
+
+ @return 1 or 0.
+**/
+UINTN
+EFIAPI
+GetGuardMapBit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINT64 *GuardMap;
+
+ FindGuardedMemoryMap (Address, FALSE, &GuardMap);
+ if (GuardMap != NULL) {
+ if (RShiftU64 (*GuardMap,
+ GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ Set the bit in bitmap table for the given address.
+
+ @param[in] Address The address to set for.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+SetGuardMapBit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINT64 *GuardMap;
+ UINT64 BitMask;
+
+ FindGuardedMemoryMap (Address, TRUE, &GuardMap);
+ if (GuardMap != NULL) {
+ BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
+ *GuardMap |= BitMask;
+ }
+}
+
+/**
+ Clear the bit in bitmap table for the given address.
+
+ @param[in] Address The address to clear for.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+ClearGuardMapBit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINT64 *GuardMap;
+ UINT64 BitMask;
+
+ FindGuardedMemoryMap (Address, TRUE, &GuardMap);
+ if (GuardMap != NULL) {
+ BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
+ *GuardMap &= ~BitMask;
+ }
+}
+
+/**
+ Check to see if the page at the given address is a Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a Guard page.
+ @return FALSE The page at Address is not a Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsGuardPage (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINTN BitMap;
+
+ //
+ // There must be at least one guarded page before and/or after given
+ // address if it's a Guard page. The bitmap pattern should be one of
+ // 001, 100 and 101
+ //
+ BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
+ return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
+}
+
+/**
+ Check to see if the page at the given address is a head Guard page or not.
+
+ @param[in] Address The address to check for
+
+ @return TRUE The page at Address is a head Guard page
+ @return FALSE The page at Address is not a head Guard page
+**/
+BOOLEAN
+EFIAPI
+IsHeadGuard (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ return (GetGuardedMemoryBits (Address, 2) == BIT1);
+}
+
+/**
+ Check to see if the page at the given address is a tail Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a tail Guard page.
+ @return FALSE The page at Address is not a tail Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsTailGuard (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
+}
+
+/**
+ Check to see if the page at the given address is guarded or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is guarded.
+ @return FALSE The page at Address is not guarded.
+**/
+BOOLEAN
+EFIAPI
+IsMemoryGuarded (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ return (GetGuardMapBit (Address) == 1);
+}
+
+/**
+ Set the page at the given address to be a Guard page.
+
+ This is done by changing the page table attribute to be NOT PRSENT.
+
+ @param[in] BaseAddress Page address to Guard at
+
+ @return VOID
+**/
+VOID
+EFIAPI
+SetGuardPage (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress
+ )
+{
+ //
+ // Set flag to make sure allocating memory without GUARD for page table
+ // operation; otherwise infinite loops could be caused.
+ //
+ mOnGuarding = TRUE;
+ //
+ // Note: This might overwrite other attributes needed by other features,
+ // such as memory protection (NX). Please make sure they are not enabled
+ // at the same time.
+ //
+ gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
+ mOnGuarding = FALSE;
+}
+
+/**
+ Unset the Guard page at the given address to the normal memory.
+
+ This is done by changing the page table attribute to be PRSENT.
+
+ @param[in] BaseAddress Page address to Guard at.
+
+ @return VOID.
+**/
+VOID
+EFIAPI
+UnsetGuardPage (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress
+ )
+{
+ //
+ // Set flag to make sure allocating memory without GUARD for page table
+ // operation; otherwise infinite loops could be caused.
+ //
+ mOnGuarding = TRUE;
+ //
+ // Note: This might overwrite other attributes needed by other features,
+ // such as memory protection (NX). Please make sure they are not enabled
+ // at the same time.
+ //
+ gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, 0);
+ mOnGuarding = FALSE;
+}
+
+/**
+ Check to see if the memory at the given address should be guarded or not.
+
+ @param[in] MemoryType Memory type to check.
+ @param[in] AllocateType Allocation type to check.
+ @param[in] PageOrPool Indicate a page allocation or pool allocation.
+
+
+ @return TRUE The given type of memory should be guarded.
+ @return FALSE The given type of memory should not be guarded.
+**/
+BOOLEAN
+IsMemoryTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN EFI_ALLOCATE_TYPE AllocateType,
+ IN UINT8 PageOrPool
+ )
+{
+ UINT64 TestBit;
+ UINT64 ConfigBit;
+ BOOLEAN InSmm;
+
+ if (gCpu == NULL || AllocateType == AllocateAddress) {
+ return FALSE;
+ }
+
+ InSmm = FALSE;
+ if (gSmmBase2 != NULL) {
+ gSmmBase2->InSmm (gSmmBase2, &InSmm);
+ }
+
+ if (InSmm) {
+ return FALSE;
+ }
+
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
+ return FALSE;
+ }
+
+ if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
+ ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
+ } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
+ ConfigBit = PcdGet64 (PcdHeapGuardPageType);
+ } else {
+ ConfigBit = (UINT64)-1;
+ }
+
+ if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
+ TestBit = BIT63;
+ } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
+ TestBit = BIT62;
+ } else if (MemoryType < EfiMaxMemoryType) {
+ TestBit = LShiftU64 (1, MemoryType);
+ } else if (MemoryType == EfiMaxMemoryType) {
+ TestBit = (UINT64)-1;
+ } else {
+ TestBit = 0;
+ }
+
+ return ((ConfigBit & TestBit) != 0);
+}
+
+/**
+ Check to see if the pool at the given address should be guarded or not.
+
+ @param[in] MemoryType Pool type to check.
+
+
+ @return TRUE The given type of pool should be guarded.
+ @return FALSE The given type of pool should not be guarded.
+**/
+BOOLEAN
+IsPoolTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType
+ )
+{
+ return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
+ GUARD_HEAP_TYPE_POOL);
+}
+
+/**
+ Check to see if the page at the given address should be guarded or not.
+
+ @param[in] MemoryType Page type to check.
+ @param[in] AllocateType Allocation type to check.
+
+ @return TRUE The given type of page should be guarded.
+ @return FALSE The given type of page should not be guarded.
+**/
+BOOLEAN
+IsPageTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN EFI_ALLOCATE_TYPE AllocateType
+ )
+{
+ return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
+}
+
+/**
+ Set head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to set guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID
+**/
+VOID
+SetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS GuardPage;
+
+ //
+ // Set tail Guard
+ //
+ GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
+ if (!IsGuardPage (GuardPage)) {
+ SetGuardPage (GuardPage);
+ }
+
+ // Set head Guard
+ GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
+ if (!IsGuardPage (GuardPage)) {
+ SetGuardPage (GuardPage);
+ }
+
+ //
+ // Mark the memory range as Guarded
+ //
+ SetGuardedMemoryBits (Memory, NumberOfPages);
+}
+
+/**
+ Unset head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to unset guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID
+**/
+VOID
+UnsetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS GuardPage;
+
+ if (NumberOfPages == 0) {
+ return;
+ }
+
+ //
+ // Head Guard must be one page before, if any.
+ //
+ GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
+ if (IsHeadGuard (GuardPage)) {
+ if (!IsMemoryGuarded (GuardPage - EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the head Guard is not a tail Guard of adjacent memory block,
+ // unset it.
+ //
+ UnsetGuardPage (GuardPage);
+ }
+ } else if (IsMemoryGuarded (GuardPage)) {
+ //
+ // Pages before memory to free are still in Guard. It's a partial free
+ // case. Turn first page of memory block to free into a new Guard.
+ //
+ SetGuardPage (Memory);
+ }
+
+ //
+ // Tail Guard must be the page after this memory block to free, if any.
+ //
+ GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
+ if (IsTailGuard (GuardPage)) {
+ if (!IsMemoryGuarded (GuardPage + EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the tail Guard is not a head Guard of adjacent memory block,
+ // free it; otherwise, keep it.
+ //
+ UnsetGuardPage (GuardPage);
+ }
+ } else if (IsMemoryGuarded (GuardPage)) {
+ //
+ // Pages after memory to free are still in Guard. It's a partial free
+ // case. We need to keep one page to be a head Guard.
+ //
+ SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
+ }
+
+ //
+ // No matter what, we just clear the mark of the Guarded memory.
+ //
+ ClearGuardedMemoryBits(Memory, NumberOfPages);
+}
+
+/**
+ Adjust address of free memory according to existing and/or required Guard.
+
+ This function will check if there're existing Guard pages of adjacent
+ memory blocks, and try to use it as the Guard page of the memory to be
+ allocated.
+
+ @param[in] Start Start address of free memory block.
+ @param[in] Size Size of free memory block.
+ @param[in] SizeRequested Size of memory to allocate.
+
+ @return The end address of memory block found.
+ @return 0 if no enough space for the required size of memory and its Guard.
+**/
+UINT64
+AdjustMemoryS (
+ IN UINT64 Start,
+ IN UINT64 Size,
+ IN UINT64 SizeRequested
+ )
+{
+ UINT64 Target;
+
+ Target = Start + Size - SizeRequested;
+
+ //
+ // At least one more page needed for Guard page.
+ //
+ if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
+ return 0;
+ }
+
+ if (!IsGuardPage (Start + Size)) {
+ // No Guard at tail to share. One more page is needed.
+ Target -= EFI_PAGES_TO_SIZE (1);
+ }
+
+ // Out of range?
+ if (Target < Start) {
+ return 0;
+ }
+
+ // At the edge?
+ if (Target == Start) {
+ if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
+ // No enough space for a new head Guard if no Guard at head to share.
+ return 0;
+ }
+ }
+
+ // OK, we have enough pages for memory and its Guards. Return the End of the
+ // free space.
+ return Target + SizeRequested - 1;
+}
+
+/**
+ Adjust the start address and number of pages to free according to Guard.
+
+ The purpose of this function is to keep the shared Guard page with adjacent
+ memory block if it's still in guard, or free it if no more sharing. Another
+ is to reserve pages as Guard pages in partial page free situation.
+
+ @param[in,out] Memory Base address of memory to free.
+ @param[in,out] NumberOfPages Size of memory to free.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryF (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS Start;
+ EFI_PHYSICAL_ADDRESS MemoryToTest;
+ UINTN PagesToFree;
+
+ if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
+ return;
+ }
+
+ Start = *Memory;
+ PagesToFree = *NumberOfPages;
+
+ //
+ // Head Guard must be one page before, if any.
+ //
+ MemoryToTest = Start - EFI_PAGES_TO_SIZE (1);
+ if (IsHeadGuard (MemoryToTest)) {
+ if (!IsMemoryGuarded (MemoryToTest - EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the head Guard is not a tail Guard of adjacent memory block,
+ // free it; otherwise, keep it.
+ //
+ Start -= EFI_PAGES_TO_SIZE (1);
+ PagesToFree += 1;
+ }
+ } else if (IsMemoryGuarded (MemoryToTest)) {
+ //
+ // Pages before memory to free are still in Guard. It's a partial free
+ // case. We need to keep one page to be a tail Guard.
+ //
+ Start += EFI_PAGES_TO_SIZE (1);
+ PagesToFree -= 1;
+ }
+
+ //
+ // Tail Guard must be the page after this memory block to free, if any.
+ //
+ MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
+ if (IsTailGuard (MemoryToTest)) {
+ if (!IsMemoryGuarded (MemoryToTest + EFI_PAGES_TO_SIZE (1))) {
+ //
+ // If the tail Guard is not a head Guard of adjacent memory block,
+ // free it; otherwise, keep it.
+ //
+ PagesToFree += 1;
+ }
+ } else if (IsMemoryGuarded (MemoryToTest)) {
+ //
+ // Pages after memory to free are still in Guard. It's a partial free
+ // case. We need to keep one page to be a head Guard.
+ //
+ PagesToFree -= 1;
+ }
+
+ *Memory = Start;
+ *NumberOfPages = PagesToFree;
+}
+
+/**
+ Adjust the base and number of pages to really allocate according to Guard.
+
+ @param[in,out] Memory Base address of free memory.
+ @param[in,out] NumberOfPages Size of memory to allocate.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryA (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ )
+{
+ //
+ // FindFreePages() has already taken the Guard into account. It's safe to
+ // adjust the start address and/or number of pages here, to make sure that
+ // the Guards are also "allocated".
+ //
+ if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
+ // No tail Guard, add one.
+ *NumberOfPages += 1;
+ }
+
+ if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
+ // No head Guard, add one.
+ *Memory -= EFI_PAGE_SIZE;
+ *NumberOfPages += 1;
+ }
+}
+
+/**
+ Adjust the pool head position to make sure the Guard page is adjavent to
+ pool tail or pool head.
+
+ @param[in] Memory Base address of memory allocated.
+ @param[in] NoPages Number of pages actually allocated.
+ @param[in] Size Size of memory requested.
+ (plus pool head/tail overhead)
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadA (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NoPages,
+ IN UINTN Size
+ )
+{
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
+ //
+ // Pool head is put near the head Guard
+ //
+ return (VOID *)(UINTN)Memory;
+ }
+
+ //
+ // Pool head is put near the tail Guard
+ //
+ return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
+}
+
+/**
+ Get the page base address according to pool head address.
+
+ @param[in] Memory Head address of pool to free.
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadF (
+ IN EFI_PHYSICAL_ADDRESS Memory
+ )
+{
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
+ //
+ // Pool head is put near the head Guard
+ //
+ return (VOID *)(UINTN)Memory;
+ }
+
+ //
+ // Pool head is put near the tail Guard
+ //
+ return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
+}
+
+/**
+ Allocate or free guarded memory.
+
+ @param[in] Start Start address of memory to allocate or free.
+ @param[in] NumberOfPages Memory size in pages.
+ @param[in] NewType Memory type to convert to.
+
+ @return VOID.
+**/
+EFI_STATUS
+CoreConvertPagesWithGuard (
+ IN UINT64 Start,
+ IN UINTN NumberOfPages,
+ IN EFI_MEMORY_TYPE NewType
+ )
+{
+ if (NewType == EfiConventionalMemory) {
+ AdjustMemoryF (&Start, &NumberOfPages);
+ } else {
+ AdjustMemoryA (&Start, &NumberOfPages);
+ }
+
+ return CoreConvertPages(Start, NumberOfPages, NewType);
+}
+
+/**
+ Helper function to convert a UINT64 value in binary to a string.
+
+ @param[in] Value Value of a UINT64 integer.
+ @param[out] BinString String buffer to contain the conversion result.
+
+ @return VOID.
+**/
+VOID
+Uint64ToBinString (
+ IN UINT64 Value,
+ OUT CHAR8 *BinString
+ )
+{
+ UINTN Index;
+
+ if (BinString == NULL) {
+ return;
+ }
+
+ for (Index = 64; Index > 0; --Index) {
+ BinString[Index - 1] = '0' + (Value & 1);
+ Value = RShiftU64 (Value, 1);
+ }
+ BinString[64] = '\0';
+}
+
+/**
+ Dump the guarded memory bit map.
+**/
+VOID
+EFIAPI
+DumpGuardedMemoryBitmap (
+ VOID
+ )
+{
+ UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
+ UINT64 TableEntry;
+ UINT64 Address;
+ INTN Level;
+ UINTN RepeatZero;
+ CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
+ CHAR8 *Ruler1;
+ CHAR8 *Ruler2;
+
+ if (mGuardedMemoryMap == 0) {
+ return;
+ }
+
+ Ruler1 = " 3 2 1 0";
+ Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
+
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
+ " Guarded Memory Bitmap "
+ "==============================\r\n"));
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
+
+ CopyMem (Entries, mLevelMask, sizeof (Entries));
+ CopyMem (Shifts, mLevelShift, sizeof (Shifts));
+
+ SetMem (Indices, sizeof(Indices), 0);
+ SetMem (Tables, sizeof(Tables), 0);
+ SetMem (Addresses, sizeof(Addresses), 0);
+
+ Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
+ Tables[Level] = mGuardedMemoryMap;
+ Address = 0;
+ RepeatZero = 0;
+
+ while (TRUE) {
+ if (Indices[Level] > Entries[Level]) {
+
+ Tables[Level] = 0;
+ Level -= 1;
+ RepeatZero = 0;
+
+ DEBUG ((
+ HEAP_GUARD_DEBUG_LEVEL,
+ "========================================="
+ "=========================================\r\n"
+ ));
+
+ } else {
+
+ TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
+ Address = Addresses[Level];
+
+ if (TableEntry == 0) {
+
+ if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
+ if (RepeatZero == 0) {
+ Uint64ToBinString(TableEntry, String);
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
+ } else if (RepeatZero == 1) {
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
+ }
+ RepeatZero += 1;
+ }
+
+ } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
+
+ Level += 1;
+ Tables[Level] = TableEntry;
+ Addresses[Level] = Address;
+ Indices[Level] = 0;
+ RepeatZero = 0;
+
+ continue;
+
+ } else {
+
+ RepeatZero = 0;
+ Uint64ToBinString(TableEntry, String);
+ DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
+
+ }
+ }
+
+ if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
+ break;
+ }
+
+ Indices[Level] += 1;
+ Address = (Level == 0) ? 0 : Addresses[Level - 1];
+ Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
+
+ }
+}
+
diff --git a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h
index 71757a83b1..bd7abd7c53 100644
--- a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h
+++ b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h
@@ -1,394 +1,394 @@
-/** @file
- Data type, macros and function prototypes of heap guard feature.
-
-Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
-This program and the accompanying materials
-are licensed and made available under the terms and conditions of the BSD License
-which accompanies this distribution. The full text of the license may be found at
-http://opensource.org/licenses/bsd-license.php
-
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-**/
-
-#ifndef _HEAPGUARD_H_
-#define _HEAPGUARD_H_
-
-//
-// Following macros are used to define and access the guarded memory bitmap
-// table.
-//
-// To simplify the access and reduce the memory used for this table, the
-// table is constructed in the similar way as page table structure but in
-// reverse direction, i.e. from bottom growing up to top.
-//
-// - 1-bit tracks 1 page (4KB)
-// - 1-UINT64 map entry tracks 256KB memory
-// - 1K-UINT64 map table tracks 256MB memory
-// - Five levels of tables can track any address of memory of 64-bit
-// system, like below.
-//
-// 512 * 512 * 512 * 512 * 1K * 64b * 4K
-// 111111111 111111111 111111111 111111111 1111111111 111111 111111111111
-// 63 54 45 36 27 17 11 0
-// 9b 9b 9b 9b 10b 6b 12b
-// L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page
-// 1FF 1FF 1FF 1FF 3FF 3F FFF
-//
-// L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
-// memory. Each table of L0-L3 will be allocated when its memory address
-// range is to be tracked. Only 1-page will be allocated each time. This
-// can save memories used to establish this map table.
-//
-// For a normal configuration of system with 4G memory, two levels of tables
-// can track the whole memory, because two levels (L3+L4) of map tables have
-// already coverred 37-bit of memory address. And for a normal UEFI BIOS,
-// less than 128M memory would be consumed during boot. That means we just
-// need
-//
-// 1-page (L3) + 2-page (L4)
-//
-// memory (3 pages) to track the memory allocation works. In this case,
-// there's no need to setup L0-L2 tables.
-//
-
-//
-// Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
-// bits in address. (512 = 1 << 9)
-//
-#define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)
-
-#define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \
- (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
-
-#define GUARDED_HEAP_MAP_TABLE_DEPTH 5
-
-// Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
-#define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)
-
-#define GUARDED_HEAP_MAP_ENTRY_BITS \
- (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
-
-#define GUARDED_HEAP_MAP_ENTRY_BYTES \
- (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
-
-// L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
-#define GUARDED_HEAP_MAP_ENTRY_SHIFT \
- (GUARDED_HEAP_MAP_ENTRY_BITS \
- - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
- - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
- - EFI_PAGE_SHIFT)
-
-// L4 table address mask: (1 << 10 - 1) = 0x3FF
-#define GUARDED_HEAP_MAP_ENTRY_MASK \
- ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
-
-// Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
-#define GUARDED_HEAP_MAP_SIZE \
- ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
-
-// Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
-#define GUARDED_HEAP_MAP_UNIT_SIZE \
- (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
-
-// L4 table entry number: 8KB / 8 = 1024
-#define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \
- (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
-
-// L4 table entry indexing
-#define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \
- (RShiftU64 (Address, EFI_PAGE_SHIFT \
- + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \
- & GUARDED_HEAP_MAP_ENTRY_MASK)
-
-// L4 table entry bit indexing
-#define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \
- (RShiftU64 (Address, EFI_PAGE_SHIFT) \
- & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
-
-//
-// Total bits (pages) tracked by one L4 table (65536-bit)
-//
-#define GUARDED_HEAP_MAP_BITS \
- (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
- + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
-
-//
-// Bit indexing inside the whole L4 table (0 - 65535)
-//
-#define GUARDED_HEAP_MAP_BIT_INDEX(Address) \
- (RShiftU64 (Address, EFI_PAGE_SHIFT) \
- & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
- + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
-
-//
-// Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
-//
-#define GUARDED_HEAP_MAP_TABLE_SHIFT \
- (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
- + EFI_PAGE_SHIFT)
-
-//
-// Macro used to initialize the local array variable for map table traversing
-// {55, 46, 37, 28, 18}
-//
-#define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \
- { \
- GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \
- GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \
- GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \
- GUARDED_HEAP_MAP_TABLE_SHIFT, \
- EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
- }
-
-//
-// Masks used to extract address range of each level of table
-// {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
-//
-#define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \
- { \
- (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
- (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
- (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
- (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
- (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \
- }
-
-//
-// Memory type to guard (matching the related PCD definition)
-//
-#define GUARD_HEAP_TYPE_POOL BIT0
-#define GUARD_HEAP_TYPE_PAGE BIT1
-
-//
-// Debug message level
-//
-#define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)
-
-typedef struct {
- UINT32 TailMark;
- UINT32 HeadMark;
- EFI_PHYSICAL_ADDRESS Address;
- LIST_ENTRY Link;
-} HEAP_GUARD_NODE;
-
-/**
- Internal function. Converts a memory range to the specified type.
- The range must exist in the memory map.
-
- @param Start The first address of the range Must be page
- aligned.
- @param NumberOfPages The number of pages to convert.
- @param NewType The new type for the memory range.
-
- @retval EFI_INVALID_PARAMETER Invalid parameter.
- @retval EFI_NOT_FOUND Could not find a descriptor cover the specified
- range or convertion not allowed.
- @retval EFI_SUCCESS Successfully converts the memory range to the
- specified type.
-
-**/
-EFI_STATUS
-CoreConvertPages (
- IN UINT64 Start,
- IN UINT64 NumberOfPages,
- IN EFI_MEMORY_TYPE NewType
- );
-
-/**
- Allocate or free guarded memory.
-
- @param[in] Start Start address of memory to allocate or free.
- @param[in] NumberOfPages Memory size in pages.
- @param[in] NewType Memory type to convert to.
-
- @return VOID.
-**/
-EFI_STATUS
-CoreConvertPagesWithGuard (
- IN UINT64 Start,
- IN UINTN NumberOfPages,
- IN EFI_MEMORY_TYPE NewType
- );
-
-/**
- Set head Guard and tail Guard for the given memory range.
-
- @param[in] Memory Base address of memory to set guard for.
- @param[in] NumberOfPages Memory size in pages.
-
- @return VOID.
-**/
-VOID
-SetGuardForMemory (
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
- );
-
-/**
- Unset head Guard and tail Guard for the given memory range.
-
- @param[in] Memory Base address of memory to unset guard for.
- @param[in] NumberOfPages Memory size in pages.
-
- @return VOID.
-**/
-VOID
-UnsetGuardForMemory (
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
- );
-
-/**
- Adjust the base and number of pages to really allocate according to Guard.
-
- @param[in,out] Memory Base address of free memory.
- @param[in,out] NumberOfPages Size of memory to allocate.
-
- @return VOID.
-**/
-VOID
-AdjustMemoryA (
- IN OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN OUT UINTN *NumberOfPages
- );
-
-/**
- Adjust the start address and number of pages to free according to Guard.
-
- The purpose of this function is to keep the shared Guard page with adjacent
- memory block if it's still in guard, or free it if no more sharing. Another
- is to reserve pages as Guard pages in partial page free situation.
-
- @param[in,out] Memory Base address of memory to free.
- @param[in,out] NumberOfPages Size of memory to free.
-
- @return VOID.
-**/
-VOID
-AdjustMemoryF (
- IN OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN OUT UINTN *NumberOfPages
- );
-
-/**
- Adjust address of free memory according to existing and/or required Guard.
-
- This function will check if there're existing Guard pages of adjacent
- memory blocks, and try to use it as the Guard page of the memory to be
- allocated.
-
- @param[in] Start Start address of free memory block.
- @param[in] Size Size of free memory block.
- @param[in] SizeRequested Size of memory to allocate.
-
- @return The end address of memory block found.
- @return 0 if no enough space for the required size of memory and its Guard.
-**/
-UINT64
-AdjustMemoryS (
- IN UINT64 Start,
- IN UINT64 Size,
- IN UINT64 SizeRequested
- );
-
-/**
- Check to see if the pool at the given address should be guarded or not.
-
- @param[in] MemoryType Pool type to check.
-
-
- @return TRUE The given type of pool should be guarded.
- @return FALSE The given type of pool should not be guarded.
-**/
-BOOLEAN
-IsPoolTypeToGuard (
- IN EFI_MEMORY_TYPE MemoryType
- );
-
-/**
- Check to see if the page at the given address should be guarded or not.
-
- @param[in] MemoryType Page type to check.
- @param[in] AllocateType Allocation type to check.
-
- @return TRUE The given type of page should be guarded.
- @return FALSE The given type of page should not be guarded.
-**/
-BOOLEAN
-IsPageTypeToGuard (
- IN EFI_MEMORY_TYPE MemoryType,
- IN EFI_ALLOCATE_TYPE AllocateType
- );
-
-/**
- Check to see if the page at the given address is guarded or not.
-
- @param[in] Address The address to check for.
-
- @return TRUE The page at Address is guarded.
- @return FALSE The page at Address is not guarded.
-**/
-BOOLEAN
-EFIAPI
-IsMemoryGuarded (
- IN EFI_PHYSICAL_ADDRESS Address
- );
-
-/**
- Check to see if the page at the given address is a Guard page or not.
-
- @param[in] Address The address to check for.
-
- @return TRUE The page at Address is a Guard page.
- @return FALSE The page at Address is not a Guard page.
-**/
-BOOLEAN
-EFIAPI
-IsGuardPage (
- IN EFI_PHYSICAL_ADDRESS Address
- );
-
-/**
- Dump the guarded memory bit map.
-**/
-VOID
-EFIAPI
-DumpGuardedMemoryBitmap (
- VOID
- );
-
-/**
- Adjust the pool head position to make sure the Guard page is adjavent to
- pool tail or pool head.
-
- @param[in] Memory Base address of memory allocated.
- @param[in] NoPages Number of pages actually allocated.
- @param[in] Size Size of memory requested.
- (plus pool head/tail overhead)
-
- @return Address of pool head.
-**/
-VOID *
-AdjustPoolHeadA (
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NoPages,
- IN UINTN Size
- );
-
-/**
- Get the page base address according to pool head address.
-
- @param[in] Memory Head address of pool to free.
-
- @return Address of pool head.
-**/
-VOID *
-AdjustPoolHeadF (
- IN EFI_PHYSICAL_ADDRESS Memory
- );
-
-extern BOOLEAN mOnGuarding;
-
-#endif
+/** @file
+ Data type, macros and function prototypes of heap guard feature.
+
+Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _HEAPGUARD_H_
+#define _HEAPGUARD_H_
+
+//
+// Following macros are used to define and access the guarded memory bitmap
+// table.
+//
+// To simplify the access and reduce the memory used for this table, the
+// table is constructed in the similar way as page table structure but in
+// reverse direction, i.e. from bottom growing up to top.
+//
+// - 1-bit tracks 1 page (4KB)
+// - 1-UINT64 map entry tracks 256KB memory
+// - 1K-UINT64 map table tracks 256MB memory
+// - Five levels of tables can track any address of memory of 64-bit
+// system, like below.
+//
+// 512 * 512 * 512 * 512 * 1K * 64b * 4K
+// 111111111 111111111 111111111 111111111 1111111111 111111 111111111111
+// 63 54 45 36 27 17 11 0
+// 9b 9b 9b 9b 10b 6b 12b
+// L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page
+// 1FF 1FF 1FF 1FF 3FF 3F FFF
+//
+// L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
+// memory. Each table of L0-L3 will be allocated when its memory address
+// range is to be tracked. Only 1-page will be allocated each time. This
+// can save memories used to establish this map table.
+//
+// For a normal configuration of system with 4G memory, two levels of tables
+// can track the whole memory, because two levels (L3+L4) of map tables have
+// already coverred 37-bit of memory address. And for a normal UEFI BIOS,
+// less than 128M memory would be consumed during boot. That means we just
+// need
+//
+// 1-page (L3) + 2-page (L4)
+//
+// memory (3 pages) to track the memory allocation works. In this case,
+// there's no need to setup L0-L2 tables.
+//
+
+//
+// Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
+// bits in address. (512 = 1 << 9)
+//
+#define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)
+
+#define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \
+ (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
+
+#define GUARDED_HEAP_MAP_TABLE_DEPTH 5
+
+// Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
+#define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)
+
+#define GUARDED_HEAP_MAP_ENTRY_BITS \
+ (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
+
+#define GUARDED_HEAP_MAP_ENTRY_BYTES \
+ (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
+
+// L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
+#define GUARDED_HEAP_MAP_ENTRY_SHIFT \
+ (GUARDED_HEAP_MAP_ENTRY_BITS \
+ - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
+ - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
+ - EFI_PAGE_SHIFT)
+
+// L4 table address mask: (1 << 10 - 1) = 0x3FF
+#define GUARDED_HEAP_MAP_ENTRY_MASK \
+ ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
+
+// Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
+#define GUARDED_HEAP_MAP_SIZE \
+ ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
+
+// Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
+#define GUARDED_HEAP_MAP_UNIT_SIZE \
+ (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
+
+// L4 table entry number: 8KB / 8 = 1024
+#define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \
+ (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
+
+// L4 table entry indexing
+#define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \
+ (RShiftU64 (Address, EFI_PAGE_SHIFT \
+ + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \
+ & GUARDED_HEAP_MAP_ENTRY_MASK)
+
+// L4 table entry bit indexing
+#define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \
+ (RShiftU64 (Address, EFI_PAGE_SHIFT) \
+ & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
+
+//
+// Total bits (pages) tracked by one L4 table (65536-bit)
+//
+#define GUARDED_HEAP_MAP_BITS \
+ (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
+ + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
+
+//
+// Bit indexing inside the whole L4 table (0 - 65535)
+//
+#define GUARDED_HEAP_MAP_BIT_INDEX(Address) \
+ (RShiftU64 (Address, EFI_PAGE_SHIFT) \
+ & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
+ + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
+
+//
+// Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
+//
+#define GUARDED_HEAP_MAP_TABLE_SHIFT \
+ (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
+ + EFI_PAGE_SHIFT)
+
+//
+// Macro used to initialize the local array variable for map table traversing
+// {55, 46, 37, 28, 18}
+//
+#define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \
+ { \
+ GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \
+ GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \
+ GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \
+ GUARDED_HEAP_MAP_TABLE_SHIFT, \
+ EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
+ }
+
+//
+// Masks used to extract address range of each level of table
+// {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
+//
+#define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \
+ { \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
+ (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \
+ }
+
+//
+// Memory type to guard (matching the related PCD definition)
+//
+#define GUARD_HEAP_TYPE_POOL BIT0
+#define GUARD_HEAP_TYPE_PAGE BIT1
+
+//
+// Debug message level
+//
+#define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)
+
+typedef struct {
+ UINT32 TailMark;
+ UINT32 HeadMark;
+ EFI_PHYSICAL_ADDRESS Address;
+ LIST_ENTRY Link;
+} HEAP_GUARD_NODE;
+
+/**
+ Internal function. Converts a memory range to the specified type.
+ The range must exist in the memory map.
+
+ @param Start The first address of the range Must be page
+ aligned.
+ @param NumberOfPages The number of pages to convert.
+ @param NewType The new type for the memory range.
+
+ @retval EFI_INVALID_PARAMETER Invalid parameter.
+ @retval EFI_NOT_FOUND Could not find a descriptor cover the specified
+ range or convertion not allowed.
+ @retval EFI_SUCCESS Successfully converts the memory range to the
+ specified type.
+
+**/
+EFI_STATUS
+CoreConvertPages (
+ IN UINT64 Start,
+ IN UINT64 NumberOfPages,
+ IN EFI_MEMORY_TYPE NewType
+ );
+
+/**
+ Allocate or free guarded memory.
+
+ @param[in] Start Start address of memory to allocate or free.
+ @param[in] NumberOfPages Memory size in pages.
+ @param[in] NewType Memory type to convert to.
+
+ @return VOID.
+**/
+EFI_STATUS
+CoreConvertPagesWithGuard (
+ IN UINT64 Start,
+ IN UINTN NumberOfPages,
+ IN EFI_MEMORY_TYPE NewType
+ );
+
+/**
+ Set head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to set guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID.
+**/
+VOID
+SetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ );
+
+/**
+ Unset head Guard and tail Guard for the given memory range.
+
+ @param[in] Memory Base address of memory to unset guard for.
+ @param[in] NumberOfPages Memory size in pages.
+
+ @return VOID.
+**/
+VOID
+UnsetGuardForMemory (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ );
+
+/**
+ Adjust the base and number of pages to really allocate according to Guard.
+
+ @param[in,out] Memory Base address of free memory.
+ @param[in,out] NumberOfPages Size of memory to allocate.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryA (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ );
+
+/**
+ Adjust the start address and number of pages to free according to Guard.
+
+ The purpose of this function is to keep the shared Guard page with adjacent
+ memory block if it's still in guard, or free it if no more sharing. Another
+ is to reserve pages as Guard pages in partial page free situation.
+
+ @param[in,out] Memory Base address of memory to free.
+ @param[in,out] NumberOfPages Size of memory to free.
+
+ @return VOID.
+**/
+VOID
+AdjustMemoryF (
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN OUT UINTN *NumberOfPages
+ );
+
+/**
+ Adjust address of free memory according to existing and/or required Guard.
+
+ This function will check if there're existing Guard pages of adjacent
+ memory blocks, and try to use it as the Guard page of the memory to be
+ allocated.
+
+ @param[in] Start Start address of free memory block.
+ @param[in] Size Size of free memory block.
+ @param[in] SizeRequested Size of memory to allocate.
+
+ @return The end address of memory block found.
+ @return 0 if no enough space for the required size of memory and its Guard.
+**/
+UINT64
+AdjustMemoryS (
+ IN UINT64 Start,
+ IN UINT64 Size,
+ IN UINT64 SizeRequested
+ );
+
+/**
+ Check to see if the pool at the given address should be guarded or not.
+
+ @param[in] MemoryType Pool type to check.
+
+
+ @return TRUE The given type of pool should be guarded.
+ @return FALSE The given type of pool should not be guarded.
+**/
+BOOLEAN
+IsPoolTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType
+ );
+
+/**
+ Check to see if the page at the given address should be guarded or not.
+
+ @param[in] MemoryType Page type to check.
+ @param[in] AllocateType Allocation type to check.
+
+ @return TRUE The given type of page should be guarded.
+ @return FALSE The given type of page should not be guarded.
+**/
+BOOLEAN
+IsPageTypeToGuard (
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN EFI_ALLOCATE_TYPE AllocateType
+ );
+
+/**
+ Check to see if the page at the given address is guarded or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is guarded.
+ @return FALSE The page at Address is not guarded.
+**/
+BOOLEAN
+EFIAPI
+IsMemoryGuarded (
+ IN EFI_PHYSICAL_ADDRESS Address
+ );
+
+/**
+ Check to see if the page at the given address is a Guard page or not.
+
+ @param[in] Address The address to check for.
+
+ @return TRUE The page at Address is a Guard page.
+ @return FALSE The page at Address is not a Guard page.
+**/
+BOOLEAN
+EFIAPI
+IsGuardPage (
+ IN EFI_PHYSICAL_ADDRESS Address
+ );
+
+/**
+ Dump the guarded memory bit map.
+**/
+VOID
+EFIAPI
+DumpGuardedMemoryBitmap (
+ VOID
+ );
+
+/**
+ Adjust the pool head position to make sure the Guard page is adjavent to
+ pool tail or pool head.
+
+ @param[in] Memory Base address of memory allocated.
+ @param[in] NoPages Number of pages actually allocated.
+ @param[in] Size Size of memory requested.
+ (plus pool head/tail overhead)
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadA (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NoPages,
+ IN UINTN Size
+ );
+
+/**
+ Get the page base address according to pool head address.
+
+ @param[in] Memory Head address of pool to free.
+
+ @return Address of pool head.
+**/
+VOID *
+AdjustPoolHeadF (
+ IN EFI_PHYSICAL_ADDRESS Memory
+ );
+
+extern BOOLEAN mOnGuarding;
+
+#endif
diff --git a/MdeModulePkg/Core/Dxe/Mem/Imem.h b/MdeModulePkg/Core/Dxe/Mem/Imem.h
index 8a6ae50b88..e58a5d62ba 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Imem.h
+++ b/MdeModulePkg/Core/Dxe/Mem/Imem.h
@@ -1,7 +1,7 @@
/** @file
Data structure and functions to allocate and free memory space.
-Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -61,7 +61,7 @@ typedef struct {
@param PoolType The type of memory for the new pool pages
@param NumberOfPages No of pages to allocate
@param Alignment Bits to align.
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The allocated memory, or NULL
@@ -70,8 +70,8 @@ VOID *
CoreAllocatePoolPages (
IN EFI_MEMORY_TYPE PoolType,
IN UINTN NumberOfPages,
- IN UINTN Alignment,
- IN BOOLEAN NeedGuard
+ IN UINTN Alignment,
+ IN BOOLEAN NeedGuard
);
@@ -97,7 +97,7 @@ CoreFreePoolPages (
@param PoolType Type of pool to allocate
@param Size The amount of pool to allocate
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The allocate pool, or NULL
@@ -105,8 +105,8 @@ CoreFreePoolPages (
VOID *
CoreAllocatePoolI (
IN EFI_MEMORY_TYPE PoolType,
- IN UINTN Size,
- IN BOOLEAN NeedGuard
+ IN UINTN Size,
+ IN BOOLEAN NeedGuard
);
@@ -149,34 +149,34 @@ CoreReleaseMemoryLock (
VOID
);
-/**
- Allocates pages from the memory map.
-
- @param Type The type of allocation to perform
- @param MemoryType The type of memory to turn the allocated pages
- into
- @param NumberOfPages The number of pages to allocate
- @param Memory A pointer to receive the base allocated memory
- address
- @param NeedGuard Flag to indicate Guard page is needed or not
-
- @return Status. On success, Memory is filled in with the base address allocated
- @retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in
- spec.
- @retval EFI_NOT_FOUND Could not allocate pages match the requirement.
- @retval EFI_OUT_OF_RESOURCES No enough pages to allocate.
- @retval EFI_SUCCESS Pages successfully allocated.
-
-**/
-EFI_STATUS
-EFIAPI
-CoreInternalAllocatePages (
- IN EFI_ALLOCATE_TYPE Type,
- IN EFI_MEMORY_TYPE MemoryType,
- IN UINTN NumberOfPages,
- IN OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN BOOLEAN NeedGuard
- );
+/**
+ Allocates pages from the memory map.
+
+ @param Type The type of allocation to perform
+ @param MemoryType The type of memory to turn the allocated pages
+ into
+ @param NumberOfPages The number of pages to allocate
+ @param Memory A pointer to receive the base allocated memory
+ address
+ @param NeedGuard Flag to indicate Guard page is needed or not
+
+ @return Status. On success, Memory is filled in with the base address allocated
+ @retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in
+ spec.
+ @retval EFI_NOT_FOUND Could not allocate pages match the requirement.
+ @retval EFI_OUT_OF_RESOURCES No enough pages to allocate.
+ @retval EFI_SUCCESS Pages successfully allocated.
+
+**/
+EFI_STATUS
+EFIAPI
+CoreInternalAllocatePages (
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN NumberOfPages,
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN BOOLEAN NeedGuard
+ );
//
// Internal Global data
diff --git a/MdeModulePkg/Core/Dxe/Mem/Page.c b/MdeModulePkg/Core/Dxe/Mem/Page.c
index f1e4a37f2a..2034b64cd7 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Page.c
+++ b/MdeModulePkg/Core/Dxe/Mem/Page.c
@@ -14,7 +14,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "DxeMain.h"
#include "Imem.h"
-#include "HeapGuard.h"
+#include "HeapGuard.h"
//
// Entry for tracking the memory regions for each memory type to coalesce similar memory types
@@ -288,12 +288,12 @@ AllocateMemoryMapEntry (
//
// The list is empty, to allocate one page to refuel the list
//
- FreeDescriptorEntries = CoreAllocatePoolPages (
- EfiBootServicesData,
+ FreeDescriptorEntries = CoreAllocatePoolPages (
+ EfiBootServicesData,
EFI_SIZE_TO_PAGES (DEFAULT_PAGE_ALLOCATION_GRANULARITY),
- DEFAULT_PAGE_ALLOCATION_GRANULARITY,
- FALSE
- );
+ DEFAULT_PAGE_ALLOCATION_GRANULARITY,
+ FALSE
+ );
if (FreeDescriptorEntries != NULL) {
//
// Enque the free memmory map entries into the list
@@ -901,40 +901,40 @@ CoreConvertPagesEx (
CoreAddRange (MemType, Start, RangeEnd, Attribute);
if (ChangingType && (MemType == EfiConventionalMemory)) {
if (Start == 0) {
- //
- // Avoid calling DEBUG_CLEAR_MEMORY() for an address of 0 because this
- // macro will ASSERT() if address is 0. Instead, CoreAddRange()
- // guarantees that the page starting at address 0 is always filled
- // with zeros.
- //
+ //
+ // Avoid calling DEBUG_CLEAR_MEMORY() for an address of 0 because this
+ // macro will ASSERT() if address is 0. Instead, CoreAddRange()
+ // guarantees that the page starting at address 0 is always filled
+ // with zeros.
+ //
if (RangeEnd > EFI_PAGE_SIZE) {
DEBUG_CLEAR_MEMORY ((VOID *)(UINTN) EFI_PAGE_SIZE, (UINTN) (RangeEnd - EFI_PAGE_SIZE + 1));
}
} else {
- //
- // If Heap Guard is enabled, the page at the top and/or bottom of
- // this memory block to free might be inaccessible. Skipping them
- // to avoid page fault exception.
- //
- UINT64 StartToClear;
- UINT64 EndToClear;
-
- StartToClear = Start;
- EndToClear = RangeEnd;
- if (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT1|BIT0)) {
- if (IsGuardPage(StartToClear)) {
- StartToClear += EFI_PAGE_SIZE;
- }
- if (IsGuardPage (EndToClear)) {
- EndToClear -= EFI_PAGE_SIZE;
- }
- ASSERT (EndToClear > StartToClear);
- }
-
- DEBUG_CLEAR_MEMORY(
- (VOID *)(UINTN)StartToClear,
- (UINTN)(EndToClear - StartToClear + 1)
- );
+ //
+ // If Heap Guard is enabled, the page at the top and/or bottom of
+ // this memory block to free might be inaccessible. Skipping them
+ // to avoid page fault exception.
+ //
+ UINT64 StartToClear;
+ UINT64 EndToClear;
+
+ StartToClear = Start;
+ EndToClear = RangeEnd;
+ if (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT1|BIT0)) {
+ if (IsGuardPage(StartToClear)) {
+ StartToClear += EFI_PAGE_SIZE;
+ }
+ if (IsGuardPage (EndToClear)) {
+ EndToClear -= EFI_PAGE_SIZE;
+ }
+ ASSERT (EndToClear > StartToClear);
+ }
+
+ DEBUG_CLEAR_MEMORY(
+ (VOID *)(UINTN)StartToClear,
+ (UINTN)(EndToClear - StartToClear + 1)
+ );
}
}
@@ -1021,7 +1021,7 @@ CoreUpdateMemoryAttributes (
@param NewType The type of memory the range is going to be
turned into
@param Alignment Bits to align with
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The base address of the range, or 0 if the range was not found
@@ -1032,8 +1032,8 @@ CoreFindFreePagesI (
IN UINT64 MinAddress,
IN UINT64 NumberOfPages,
IN EFI_MEMORY_TYPE NewType,
- IN UINTN Alignment,
- IN BOOLEAN NeedGuard
+ IN UINTN Alignment,
+ IN BOOLEAN NeedGuard
)
{
UINT64 NumberOfBytes;
@@ -1125,17 +1125,17 @@ CoreFindFreePagesI (
// If this is the best match so far remember it
//
if (DescEnd > Target) {
- if (NeedGuard) {
- DescEnd = AdjustMemoryS (
- DescEnd + 1 - DescNumberOfBytes,
- DescNumberOfBytes,
- NumberOfBytes
- );
- if (DescEnd == 0) {
- continue;
- }
- }
-
+ if (NeedGuard) {
+ DescEnd = AdjustMemoryS (
+ DescEnd + 1 - DescNumberOfBytes,
+ DescNumberOfBytes,
+ NumberOfBytes
+ );
+ if (DescEnd == 0) {
+ continue;
+ }
+ }
+
Target = DescEnd;
}
}
@@ -1166,7 +1166,7 @@ CoreFindFreePagesI (
@param NewType The type of memory the range is going to be
turned into
@param Alignment Bits to align with
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The base address of the range, or 0 if the range was not found.
@@ -1176,8 +1176,8 @@ FindFreePages (
IN UINT64 MaxAddress,
IN UINT64 NoPages,
IN EFI_MEMORY_TYPE NewType,
- IN UINTN Alignment,
- IN BOOLEAN NeedGuard
+ IN UINTN Alignment,
+ IN BOOLEAN NeedGuard
)
{
UINT64 Start;
@@ -1191,8 +1191,8 @@ FindFreePages (
mMemoryTypeStatistics[NewType].BaseAddress,
NoPages,
NewType,
- Alignment,
- NeedGuard
+ Alignment,
+ NeedGuard
);
if (Start != 0) {
return Start;
@@ -1203,8 +1203,8 @@ FindFreePages (
// Attempt to find free pages in the default allocation bin
//
if (MaxAddress >= mDefaultMaximumAddress) {
- Start = CoreFindFreePagesI (mDefaultMaximumAddress, 0, NoPages, NewType,
- Alignment, NeedGuard);
+ Start = CoreFindFreePagesI (mDefaultMaximumAddress, 0, NoPages, NewType,
+ Alignment, NeedGuard);
if (Start != 0) {
if (Start < mDefaultBaseAddress) {
mDefaultBaseAddress = Start;
@@ -1219,8 +1219,8 @@ FindFreePages (
// address range. If this allocation fails, then there are not enough
// resources anywhere to satisfy the request.
//
- Start = CoreFindFreePagesI (MaxAddress, 0, NoPages, NewType, Alignment,
- NeedGuard);
+ Start = CoreFindFreePagesI (MaxAddress, 0, NoPages, NewType, Alignment,
+ NeedGuard);
if (Start != 0) {
return Start;
}
@@ -1235,7 +1235,7 @@ FindFreePages (
//
// If any memory resources were promoted, then re-attempt the allocation
//
- return FindFreePages (MaxAddress, NoPages, NewType, Alignment, NeedGuard);
+ return FindFreePages (MaxAddress, NoPages, NewType, Alignment, NeedGuard);
}
@@ -1248,7 +1248,7 @@ FindFreePages (
@param NumberOfPages The number of pages to allocate
@param Memory A pointer to receive the base allocated memory
address
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return Status. On success, Memory is filled in with the base address allocated
@retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in
@@ -1264,8 +1264,8 @@ CoreInternalAllocatePages (
IN EFI_ALLOCATE_TYPE Type,
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN NumberOfPages,
- IN OUT EFI_PHYSICAL_ADDRESS *Memory,
- IN BOOLEAN NeedGuard
+ IN OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN BOOLEAN NeedGuard
)
{
EFI_STATUS Status;
@@ -1351,8 +1351,8 @@ CoreInternalAllocatePages (
// If not a specific address, then find an address to allocate
//
if (Type != AllocateAddress) {
- Start = FindFreePages (MaxAddress, NumberOfPages, MemoryType, Alignment,
- NeedGuard);
+ Start = FindFreePages (MaxAddress, NumberOfPages, MemoryType, Alignment,
+ NeedGuard);
if (Start == 0) {
Status = EFI_OUT_OF_RESOURCES;
goto Done;
@@ -1362,19 +1362,19 @@ CoreInternalAllocatePages (
//
// Convert pages from FreeMemory to the requested type
//
- if (NeedGuard) {
- Status = CoreConvertPagesWithGuard(Start, NumberOfPages, MemoryType);
- } else {
- Status = CoreConvertPages(Start, NumberOfPages, MemoryType);
- }
+ if (NeedGuard) {
+ Status = CoreConvertPagesWithGuard(Start, NumberOfPages, MemoryType);
+ } else {
+ Status = CoreConvertPages(Start, NumberOfPages, MemoryType);
+ }
Done:
CoreReleaseMemoryLock ();
if (!EFI_ERROR (Status)) {
- if (NeedGuard) {
- SetGuardForMemory (Start, NumberOfPages);
- }
+ if (NeedGuard) {
+ SetGuardForMemory (Start, NumberOfPages);
+ }
*Memory = Start;
}
@@ -1409,11 +1409,11 @@ CoreAllocatePages (
)
{
EFI_STATUS Status;
- BOOLEAN NeedGuard;
+ BOOLEAN NeedGuard;
- NeedGuard = IsPageTypeToGuard (MemoryType, Type) && !mOnGuarding;
- Status = CoreInternalAllocatePages (Type, MemoryType, NumberOfPages, Memory,
- NeedGuard);
+ NeedGuard = IsPageTypeToGuard (MemoryType, Type) && !mOnGuarding;
+ Status = CoreInternalAllocatePages (Type, MemoryType, NumberOfPages, Memory,
+ NeedGuard);
if (!EFI_ERROR (Status)) {
CoreUpdateProfile (
(EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0),
@@ -1454,7 +1454,7 @@ CoreInternalFreePages (
LIST_ENTRY *Link;
MEMORY_MAP *Entry;
UINTN Alignment;
- BOOLEAN IsGuarded;
+ BOOLEAN IsGuarded;
//
// Free the range
@@ -1464,7 +1464,7 @@ CoreInternalFreePages (
//
// Find the entry that the covers the range
//
- IsGuarded = FALSE;
+ IsGuarded = FALSE;
Entry = NULL;
for (Link = gMemoryMap.ForwardLink; Link != &gMemoryMap; Link = Link->ForwardLink) {
Entry = CR(Link, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
@@ -1501,20 +1501,20 @@ CoreInternalFreePages (
*MemoryType = Entry->Type;
}
- IsGuarded = IsPageTypeToGuard (Entry->Type, AllocateAnyPages) &&
- IsMemoryGuarded (Memory);
- if (IsGuarded) {
- Status = CoreConvertPagesWithGuard (Memory, NumberOfPages,
- EfiConventionalMemory);
- } else {
- Status = CoreConvertPages (Memory, NumberOfPages, EfiConventionalMemory);
+ IsGuarded = IsPageTypeToGuard (Entry->Type, AllocateAnyPages) &&
+ IsMemoryGuarded (Memory);
+ if (IsGuarded) {
+ Status = CoreConvertPagesWithGuard (Memory, NumberOfPages,
+ EfiConventionalMemory);
+ } else {
+ Status = CoreConvertPages (Memory, NumberOfPages, EfiConventionalMemory);
}
Done:
CoreReleaseMemoryLock ();
- if (IsGuarded) {
- UnsetGuardForMemory(Memory, NumberOfPages);
- }
+ if (IsGuarded) {
+ UnsetGuardForMemory(Memory, NumberOfPages);
+ }
return Status;
}
@@ -1912,12 +1912,12 @@ Done:
*MemoryMapSize = BufferSize;
- DEBUG_CODE (
- if (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT1|BIT0)) {
- DumpGuardedMemoryBitmap ();
- }
- );
-
+ DEBUG_CODE (
+ if (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT1|BIT0)) {
+ DumpGuardedMemoryBitmap ();
+ }
+ );
+
return Status;
}
@@ -1929,7 +1929,7 @@ Done:
@param PoolType The type of memory for the new pool pages
@param NumberOfPages No of pages to allocate
@param Alignment Bits to align.
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The allocated memory, or NULL
@@ -1938,8 +1938,8 @@ VOID *
CoreAllocatePoolPages (
IN EFI_MEMORY_TYPE PoolType,
IN UINTN NumberOfPages,
- IN UINTN Alignment,
- IN BOOLEAN NeedGuard
+ IN UINTN Alignment,
+ IN BOOLEAN NeedGuard
)
{
UINT64 Start;
@@ -1947,8 +1947,8 @@ CoreAllocatePoolPages (
//
// Find the pages to convert
//
- Start = FindFreePages (MAX_ADDRESS, NumberOfPages, PoolType, Alignment,
- NeedGuard);
+ Start = FindFreePages (MAX_ADDRESS, NumberOfPages, PoolType, Alignment,
+ NeedGuard);
//
// Convert it to boot services data
@@ -1956,11 +1956,11 @@ CoreAllocatePoolPages (
if (Start == 0) {
DEBUG ((DEBUG_ERROR | DEBUG_PAGE, "AllocatePoolPages: failed to allocate %d pages\n", (UINT32)NumberOfPages));
} else {
- if (NeedGuard) {
- CoreConvertPagesWithGuard (Start, NumberOfPages, PoolType);
- } else {
- CoreConvertPages (Start, NumberOfPages, PoolType);
- }
+ if (NeedGuard) {
+ CoreConvertPagesWithGuard (Start, NumberOfPages, PoolType);
+ } else {
+ CoreConvertPages (Start, NumberOfPages, PoolType);
+ }
}
return (VOID *)(UINTN) Start;
diff --git a/MdeModulePkg/Core/Dxe/Mem/Pool.c b/MdeModulePkg/Core/Dxe/Mem/Pool.c
index 77ca5671dc..b82b51595c 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Pool.c
+++ b/MdeModulePkg/Core/Dxe/Mem/Pool.c
@@ -14,7 +14,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "DxeMain.h"
#include "Imem.h"
-#include "HeapGuard.h"
+#include "HeapGuard.h"
STATIC EFI_LOCK mPoolMemoryLock = EFI_INITIALIZE_LOCK_VARIABLE (TPL_NOTIFY);
@@ -170,7 +170,7 @@ LookupPoolHead (
}
}
- Pool = CoreAllocatePoolI (EfiBootServicesData, sizeof (POOL), FALSE);
+ Pool = CoreAllocatePoolI (EfiBootServicesData, sizeof (POOL), FALSE);
if (Pool == NULL) {
return NULL;
}
@@ -215,8 +215,8 @@ CoreInternalAllocatePool (
OUT VOID **Buffer
)
{
- EFI_STATUS Status;
- BOOLEAN NeedGuard;
+ EFI_STATUS Status;
+ BOOLEAN NeedGuard;
//
// If it's not a valid type, fail it
@@ -240,8 +240,8 @@ CoreInternalAllocatePool (
return EFI_OUT_OF_RESOURCES;
}
- NeedGuard = IsPoolTypeToGuard (PoolType) && !mOnGuarding;
-
+ NeedGuard = IsPoolTypeToGuard (PoolType) && !mOnGuarding;
+
//
// Acquire the memory lock and make the allocation
//
@@ -250,7 +250,7 @@ CoreInternalAllocatePool (
return EFI_OUT_OF_RESOURCES;
}
- *Buffer = CoreAllocatePoolI (PoolType, Size, NeedGuard);
+ *Buffer = CoreAllocatePoolI (PoolType, Size, NeedGuard);
CoreReleaseLock (&mPoolMemoryLock);
return (*Buffer != NULL) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
}
@@ -302,7 +302,7 @@ CoreAllocatePool (
@param PoolType The type of memory for the new pool pages
@param NoPages No of pages to allocate
@param Granularity Bits to align.
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The allocated memory, or NULL
@@ -312,8 +312,8 @@ VOID *
CoreAllocatePoolPagesI (
IN EFI_MEMORY_TYPE PoolType,
IN UINTN NoPages,
- IN UINTN Granularity,
- IN BOOLEAN NeedGuard
+ IN UINTN Granularity,
+ IN BOOLEAN NeedGuard
)
{
VOID *Buffer;
@@ -324,14 +324,14 @@ CoreAllocatePoolPagesI (
return NULL;
}
- Buffer = CoreAllocatePoolPages (PoolType, NoPages, Granularity, NeedGuard);
+ Buffer = CoreAllocatePoolPages (PoolType, NoPages, Granularity, NeedGuard);
CoreReleaseMemoryLock ();
if (Buffer != NULL) {
- if (NeedGuard) {
- SetGuardForMemory ((EFI_PHYSICAL_ADDRESS)(UINTN)Buffer, NoPages);
- }
- ApplyMemoryProtectionPolicy(EfiConventionalMemory, PoolType,
+ if (NeedGuard) {
+ SetGuardForMemory ((EFI_PHYSICAL_ADDRESS)(UINTN)Buffer, NoPages);
+ }
+ ApplyMemoryProtectionPolicy(EfiConventionalMemory, PoolType,
(EFI_PHYSICAL_ADDRESS)(UINTN)Buffer, EFI_PAGES_TO_SIZE (NoPages));
}
return Buffer;
@@ -343,7 +343,7 @@ CoreAllocatePoolPagesI (
@param PoolType Type of pool to allocate
@param Size The amount of pool to allocate
- @param NeedGuard Flag to indicate Guard page is needed or not
+ @param NeedGuard Flag to indicate Guard page is needed or not
@return The allocate pool, or NULL
@@ -351,8 +351,8 @@ CoreAllocatePoolPagesI (
VOID *
CoreAllocatePoolI (
IN EFI_MEMORY_TYPE PoolType,
- IN UINTN Size,
- IN BOOLEAN NeedGuard
+ IN UINTN Size,
+ IN BOOLEAN NeedGuard
)
{
POOL *Pool;
@@ -366,7 +366,7 @@ CoreAllocatePoolI (
UINTN Offset, MaxOffset;
UINTN NoPages;
UINTN Granularity;
- BOOLEAN HasPoolTail;
+ BOOLEAN HasPoolTail;
ASSERT_LOCKED (&mPoolMemoryLock);
@@ -384,9 +384,9 @@ CoreAllocatePoolI (
// Adjust the size by the pool header & tail overhead
//
- HasPoolTail = !(NeedGuard &&
- ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0));
-
+ HasPoolTail = !(NeedGuard &&
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0));
+
//
// Adjusting the Size to be of proper alignment so that
// we don't get an unaligned access fault later when
@@ -406,16 +406,16 @@ CoreAllocatePoolI (
// If allocation is over max size, just allocate pages for the request
// (slow)
//
- if (Index >= SIZE_TO_LIST (Granularity) || NeedGuard) {
- if (!HasPoolTail) {
- Size -= sizeof (POOL_TAIL);
- }
- NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1;
+ if (Index >= SIZE_TO_LIST (Granularity) || NeedGuard) {
+ if (!HasPoolTail) {
+ Size -= sizeof (POOL_TAIL);
+ }
+ NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1;
NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (Granularity) - 1);
- Head = CoreAllocatePoolPagesI (PoolType, NoPages, Granularity, NeedGuard);
- if (NeedGuard) {
- Head = AdjustPoolHeadA ((EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages, Size);
- }
+ Head = CoreAllocatePoolPagesI (PoolType, NoPages, Granularity, NeedGuard);
+ if (NeedGuard) {
+ Head = AdjustPoolHeadA ((EFI_PHYSICAL_ADDRESS)(UINTN)Head, NoPages, Size);
+ }
goto Done;
}
@@ -443,8 +443,8 @@ CoreAllocatePoolI (
//
// Get another page
//
- NewPage = CoreAllocatePoolPagesI (PoolType, EFI_SIZE_TO_PAGES (Granularity),
- Granularity, NeedGuard);
+ NewPage = CoreAllocatePoolPagesI (PoolType, EFI_SIZE_TO_PAGES (Granularity),
+ Granularity, NeedGuard);
if (NewPage == NULL) {
goto Done;
}
@@ -490,11 +490,11 @@ Done:
if (Head != NULL) {
- //
- // Account the allocation
- //
- Pool->Used += Size;
-
+ //
+ // Account the allocation
+ //
+ Pool->Used += Size;
+
//
// If we have a pool buffer, fill in the header & tail info
//
@@ -502,24 +502,24 @@ Done:
Head->Size = Size;
Head->Type = (EFI_MEMORY_TYPE) PoolType;
Buffer = Head->Data;
-
- if (HasPoolTail) {
- Tail = HEAD_TO_TAIL (Head);
- Tail->Signature = POOL_TAIL_SIGNATURE;
- Tail->Size = Size;
-
- Size -= POOL_OVERHEAD;
- } else {
- Size -= SIZE_OF_POOL_HEAD;
- }
-
- DEBUG_CLEAR_MEMORY (Buffer, Size);
+
+ if (HasPoolTail) {
+ Tail = HEAD_TO_TAIL (Head);
+ Tail->Signature = POOL_TAIL_SIGNATURE;
+ Tail->Size = Size;
+
+ Size -= POOL_OVERHEAD;
+ } else {
+ Size -= SIZE_OF_POOL_HEAD;
+ }
+
+ DEBUG_CLEAR_MEMORY (Buffer, Size);
DEBUG ((
DEBUG_POOL,
"AllocatePoolI: Type %x, Addr %p (len %lx) %,ld\n", PoolType,
Buffer,
- (UINT64)Size,
+ (UINT64)Size,
(UINT64) Pool->Used
));
@@ -619,34 +619,34 @@ CoreFreePoolPagesI (
(EFI_PHYSICAL_ADDRESS)(UINTN)Memory, EFI_PAGES_TO_SIZE (NoPages));
}
-/**
- Internal function. Frees guarded pool pages.
-
- @param PoolType The type of memory for the pool pages
- @param Memory The base address to free
- @param NoPages The number of pages to free
-
-**/
-STATIC
-VOID
-CoreFreePoolPagesWithGuard (
- IN EFI_MEMORY_TYPE PoolType,
- IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NoPages
- )
-{
- EFI_PHYSICAL_ADDRESS MemoryGuarded;
- UINTN NoPagesGuarded;
-
- MemoryGuarded = Memory;
- NoPagesGuarded = NoPages;
-
- AdjustMemoryF (&Memory, &NoPages);
- CoreFreePoolPagesI (PoolType, Memory, NoPages);
-
- UnsetGuardForMemory (MemoryGuarded, NoPagesGuarded);
-}
-
+/**
+ Internal function. Frees guarded pool pages.
+
+ @param PoolType The type of memory for the pool pages
+ @param Memory The base address to free
+ @param NoPages The number of pages to free
+
+**/
+STATIC
+VOID
+CoreFreePoolPagesWithGuard (
+ IN EFI_MEMORY_TYPE PoolType,
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NoPages
+ )
+{
+ EFI_PHYSICAL_ADDRESS MemoryGuarded;
+ UINTN NoPagesGuarded;
+
+ MemoryGuarded = Memory;
+ NoPagesGuarded = NoPages;
+
+ AdjustMemoryF (&Memory, &NoPages);
+ CoreFreePoolPagesI (PoolType, Memory, NoPages);
+
+ UnsetGuardForMemory (MemoryGuarded, NoPagesGuarded);
+}
+
/**
Internal function to free a pool entry.
Caller must have the memory lock held
@@ -675,8 +675,8 @@ CoreFreePoolI (
UINTN Offset;
BOOLEAN AllFree;
UINTN Granularity;
- BOOLEAN IsGuarded;
- BOOLEAN HasPoolTail;
+ BOOLEAN IsGuarded;
+ BOOLEAN HasPoolTail;
ASSERT(Buffer != NULL);
//
@@ -689,32 +689,32 @@ CoreFreePoolI (
return EFI_INVALID_PARAMETER;
}
- IsGuarded = IsPoolTypeToGuard (Head->Type) &&
- IsMemoryGuarded ((EFI_PHYSICAL_ADDRESS)(UINTN)Head);
- HasPoolTail = !(IsGuarded &&
- ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0));
-
- if (HasPoolTail) {
- Tail = HEAD_TO_TAIL (Head);
- ASSERT (Tail != NULL);
-
- //
- // Debug
- //
- ASSERT (Tail->Signature == POOL_TAIL_SIGNATURE);
- ASSERT (Head->Size == Tail->Size);
-
- if (Tail->Signature != POOL_TAIL_SIGNATURE) {
- return EFI_INVALID_PARAMETER;
- }
-
- if (Head->Size != Tail->Size) {
- return EFI_INVALID_PARAMETER;
- }
+ IsGuarded = IsPoolTypeToGuard (Head->Type) &&
+ IsMemoryGuarded ((EFI_PHYSICAL_ADDRESS)(UINTN)Head);
+ HasPoolTail = !(IsGuarded &&
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0));
+
+ if (HasPoolTail) {
+ Tail = HEAD_TO_TAIL (Head);
+ ASSERT (Tail != NULL);
+
+ //
+ // Debug
+ //
+ ASSERT (Tail->Signature == POOL_TAIL_SIGNATURE);
+ ASSERT (Head->Size == Tail->Size);
+
+ if (Tail->Signature != POOL_TAIL_SIGNATURE) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (Head->Size != Tail->Size) {
+ return EFI_INVALID_PARAMETER;
+ }
}
- ASSERT_LOCKED (&mPoolMemoryLock);
-
+ ASSERT_LOCKED (&mPoolMemoryLock);
+
//
// Determine the pool type and account for it
//
@@ -749,27 +749,27 @@ CoreFreePoolI (
//
// If it's not on the list, it must be pool pages
//
- if (Index >= SIZE_TO_LIST (Granularity) || IsGuarded) {
+ if (Index >= SIZE_TO_LIST (Granularity) || IsGuarded) {
//
// Return the memory pages back to free memory
//
- NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1;
+ NoPages = EFI_SIZE_TO_PAGES (Size) + EFI_SIZE_TO_PAGES (Granularity) - 1;
NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (Granularity) - 1);
- if (IsGuarded) {
- Head = AdjustPoolHeadF ((EFI_PHYSICAL_ADDRESS)(UINTN)Head);
- CoreFreePoolPagesWithGuard (
- Pool->MemoryType,
- (EFI_PHYSICAL_ADDRESS)(UINTN)Head,
- NoPages
- );
- } else {
- CoreFreePoolPagesI (
- Pool->MemoryType,
- (EFI_PHYSICAL_ADDRESS)(UINTN)Head,
- NoPages
- );
- }
+ if (IsGuarded) {
+ Head = AdjustPoolHeadF ((EFI_PHYSICAL_ADDRESS)(UINTN)Head);
+ CoreFreePoolPagesWithGuard (
+ Pool->MemoryType,
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Head,
+ NoPages
+ );
+ } else {
+ CoreFreePoolPagesI (
+ Pool->MemoryType,
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Head,
+ NoPages
+ );
+ }
} else {
diff --git a/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c b/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c
index e1e611ab79..21a52d0af5 100644
--- a/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c
+++ b/MdeModulePkg/Core/Dxe/Misc/MemoryProtection.c
@@ -1065,15 +1065,15 @@ CoreInitializeMemoryProtection (
// - code regions should have no EFI_MEMORY_XP attribute
// - EfiConventionalMemory and EfiBootServicesData should use the
// same attribute
- // - heap guard should not be enabled for the same type of memory
+ // - heap guard should not be enabled for the same type of memory
//
ASSERT ((GetPermissionAttributeForMemoryType (EfiBootServicesCode) & EFI_MEMORY_XP) == 0);
ASSERT ((GetPermissionAttributeForMemoryType (EfiRuntimeServicesCode) & EFI_MEMORY_XP) == 0);
ASSERT ((GetPermissionAttributeForMemoryType (EfiLoaderCode) & EFI_MEMORY_XP) == 0);
ASSERT (GetPermissionAttributeForMemoryType (EfiBootServicesData) ==
GetPermissionAttributeForMemoryType (EfiConventionalMemory));
- ASSERT ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & PcdGet64 (PcdHeapGuardPoolType)) == 0);
- ASSERT ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & PcdGet64 (PcdHeapGuardPageType)) == 0);
+ ASSERT ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & PcdGet64 (PcdHeapGuardPoolType)) == 0);
+ ASSERT ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & PcdGet64 (PcdHeapGuardPageType)) == 0);
if (mImageProtectionPolicy != 0 || PcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0) {
Status = CoreCreateEvent (