summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJian J Wang <jian.j.wang@intel.com>2018-01-17 18:36:55 +0800
committerStar Zeng <star.zeng@intel.com>2018-01-19 14:16:11 +0800
commit425d25699be83c35e12df8470b827d7fbcef3bce (patch)
tree8c148e4dd10131ca0d9c70c2a4ea0ab1d41595c1
parentd40695ad82a6a9990d4e73bc9cc3de123eda7a90 (diff)
downloadedk2-425d25699be83c35e12df8470b827d7fbcef3bce.tar.gz
edk2-425d25699be83c35e12df8470b827d7fbcef3bce.tar.bz2
edk2-425d25699be83c35e12df8470b827d7fbcef3bce.zip
MdeModulePkg/Core: fix a logic hole in page free
This hole will cause page fault randomly. The root cause is that Guard page, which is just freed back to page pool but not yet cleared not- present attribute, will be allocated right away by internal function CoreFreeMemoryMapStack(). The solution to this issue is to clear the not-present attribute for freed Guard page before doing any free operation, instead of after those operation. The reason we didn't do this before is due to the fact that manipulating page attributes might cause memory allocation action which would cause a dead lock inside a memory allocation/free operation. So we always set or unset Guard page outside the memory lock. After a thorough analysis, we believe clearing a Guard page will not cause memory allocation because memory we're to manipulate was already manipulated before for sure. Therefore there should be no memory allocation occurring in this situation. Since we cleared Guard page not-present attribute before freeing instead of after freeing, the debug code to clear freed memory can now be restored to its original way (aka no checking and bypassing Guard page). Cc: Ruiyu Ni <ruiyu.ni@intel.com> Cc: Eric Dong <eric.dong@intel.com> Cc: Star Zeng <star.zeng@intel.com> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang <jian.j.wang@intel.com> Reviewed-by: Star Zeng <star.zeng@intel.com> Reviewed-by: Ruiyu Ni <ruiyu.ni@intel.com>
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/HeapGuard.c15
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Page.c40
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Pool.c10
3 files changed, 29 insertions, 36 deletions
diff --git a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
index 0f035043e1..92753c7269 100644
--- a/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
+++ b/MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
@@ -1127,11 +1127,26 @@ CoreConvertPagesWithGuard (
IN EFI_MEMORY_TYPE NewType
)
{
+ UINT64 OldStart;
+ UINTN OldPages;
+
if (NewType == EfiConventionalMemory) {
+ OldStart = Start;
+ OldPages = NumberOfPages;
+
AdjustMemoryF (&Start, &NumberOfPages);
if (NumberOfPages == 0) {
return EFI_SUCCESS;
}
+
+ //
+ // It's safe to unset Guard page inside memory lock because there should
+ // be no memory allocation occurred in updating memory page attribute at
+ // this point. And unsetting Guard page before free will prevent Guard
+ // page just freed back to pool from being allocated right away before
+ // marking it usable (from non-present to present).
+ //
+ UnsetGuardForMemory (OldStart, OldPages);
} else {
AdjustMemoryA (&Start, &NumberOfPages);
}
diff --git a/MdeModulePkg/Core/Dxe/Mem/Page.c b/MdeModulePkg/Core/Dxe/Mem/Page.c
index db32d0f940..8d5d03a6d9 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Page.c
+++ b/MdeModulePkg/Core/Dxe/Mem/Page.c
@@ -900,42 +900,17 @@ CoreConvertPagesEx (
//
CoreAddRange (MemType, Start, RangeEnd, Attribute);
if (ChangingType && (MemType == EfiConventionalMemory)) {
+ //
+ // Avoid calling DEBUG_CLEAR_MEMORY() for an address of 0 because this
+ // macro will ASSERT() if address is 0. Instead, CoreAddRange() guarantees
+ // that the page starting at address 0 is always filled with zeros.
+ //
if (Start == 0) {
- //
- // Avoid calling DEBUG_CLEAR_MEMORY() for an address of 0 because this
- // macro will ASSERT() if address is 0. Instead, CoreAddRange()
- // guarantees that the page starting at address 0 is always filled
- // with zeros.
- //
if (RangeEnd > EFI_PAGE_SIZE) {
DEBUG_CLEAR_MEMORY ((VOID *)(UINTN) EFI_PAGE_SIZE, (UINTN) (RangeEnd - EFI_PAGE_SIZE + 1));
}
} else {
- //
- // If Heap Guard is enabled, the page at the top and/or bottom of
- // this memory block to free might be inaccessible. Skipping them
- // to avoid page fault exception.
- //
- UINT64 StartToClear;
- UINT64 EndToClear;
-
- StartToClear = Start;
- EndToClear = RangeEnd + 1;
- if (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT1|BIT0)) {
- if (IsGuardPage(StartToClear)) {
- StartToClear += EFI_PAGE_SIZE;
- }
- if (IsGuardPage (EndToClear - 1)) {
- EndToClear -= EFI_PAGE_SIZE;
- }
- }
-
- if (EndToClear > StartToClear) {
- DEBUG_CLEAR_MEMORY(
- (VOID *)(UINTN)StartToClear,
- (UINTN)(EndToClear - StartToClear)
- );
- }
+ DEBUG_CLEAR_MEMORY ((VOID *)(UINTN) Start, (UINTN) (RangeEnd - Start + 1));
}
}
@@ -1513,9 +1488,6 @@ CoreInternalFreePages (
Done:
CoreReleaseMemoryLock ();
- if (IsGuarded) {
- UnsetGuardForMemory(Memory, NumberOfPages);
- }
return Status;
}
diff --git a/MdeModulePkg/Core/Dxe/Mem/Pool.c b/MdeModulePkg/Core/Dxe/Mem/Pool.c
index 7464d8773a..df9a1d28df 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Pool.c
+++ b/MdeModulePkg/Core/Dxe/Mem/Pool.c
@@ -643,10 +643,16 @@ CoreFreePoolPagesWithGuard (
AdjustMemoryF (&Memory, &NoPages);
if (NoPages > 0) {
+ //
+ // It's safe to unset Guard page inside memory lock because there should
+ // be no memory allocation occurred in updating memory page attribute at
+ // this point. And unsetting Guard page before free will prevent Guard
+ // page just freed back to pool from being allocated right away before
+ // marking it usable (from non-present to present).
+ //
+ UnsetGuardForMemory (MemoryGuarded, NoPagesGuarded);
CoreFreePoolPagesI (PoolType, Memory, NoPages);
}
-
- UnsetGuardForMemory (MemoryGuarded, NoPagesGuarded);
}
/**