diff options
author | Dun Tan <dun.tan@intel.com> | 2024-05-10 11:48:15 +0800 |
---|---|---|
committer | mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> | 2024-06-04 07:40:27 +0000 |
commit | fcd09b1edbd377c174e98bf33ace4fc905d4cf29 (patch) | |
tree | 9b6b8ca17b60d5d3df8ec9c379438710fab8e22f | |
parent | 68310cd56a9cf0824cf4c70e84eb9736b925dc64 (diff) | |
download | edk2-fcd09b1edbd377c174e98bf33ace4fc905d4cf29.tar.gz edk2-fcd09b1edbd377c174e98bf33ace4fc905d4cf29.tar.bz2 edk2-fcd09b1edbd377c174e98bf33ace4fc905d4cf29.zip |
UefiCpuPkg:Move some code in DxeMpLib to common place
Move some code in DxeMpLib.C to common MpLib.c.
The related code is to relocate Ap to new safe buffer
before booting into OS. In next commits, these code
also will be used by PeiMpLib. This commit doesn't
change any code functionality.
Signed-off-by: Dun Tan <dun.tan@intel.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Jiaxin Wu <jiaxin.wu@intel.com>
-rw-r--r-- | UefiCpuPkg/Library/MpInitLib/DxeMpLib.c | 151 | ||||
-rw-r--r-- | UefiCpuPkg/Library/MpInitLib/MpLib.c | 142 | ||||
-rw-r--r-- | UefiCpuPkg/Library/MpInitLib/MpLib.h | 53 |
3 files changed, 198 insertions, 148 deletions
diff --git a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c index 5f0a87c024..f9c5c92c22 100644 --- a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c +++ b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c @@ -20,15 +20,11 @@ #define AP_SAFE_STACK_SIZE 128
-CPU_MP_DATA *mCpuMpData = NULL;
-EFI_EVENT mCheckAllApsEvent = NULL;
-EFI_EVENT mMpInitExitBootServicesEvent = NULL;
-EFI_EVENT mLegacyBootEvent = NULL;
-volatile BOOLEAN mStopCheckAllApsStatus = TRUE;
-RELOCATE_AP_LOOP_ENTRY mReservedApLoop;
-UINTN mReservedTopOfApStack;
-volatile UINT32 mNumberToFinish = 0;
-UINTN mApPageTable;
+CPU_MP_DATA *mCpuMpData = NULL;
+EFI_EVENT mCheckAllApsEvent = NULL;
+EFI_EVENT mMpInitExitBootServicesEvent = NULL;
+EFI_EVENT mLegacyBootEvent = NULL;
+volatile BOOLEAN mStopCheckAllApsStatus = TRUE;
//
// Begin wakeup buffer allocation below 0x88000
@@ -369,60 +365,6 @@ GetProtectedModeCS ( }
/**
- Do sync on APs.
-
- @param[in, out] Buffer Pointer to private data buffer.
-**/
-VOID
-EFIAPI
-RelocateApLoop (
- IN OUT VOID *Buffer
- )
-{
- CPU_MP_DATA *CpuMpData;
- BOOLEAN MwaitSupport;
- UINTN ProcessorNumber;
- UINTN StackStart;
-
- MpInitLibWhoAmI (&ProcessorNumber);
- CpuMpData = GetCpuMpData ();
- MwaitSupport = IsMwaitSupport ();
- if (CpuMpData->UseSevEsAPMethod) {
- //
- // 64-bit AMD processors with SEV-ES
- //
- StackStart = CpuMpData->SevEsAPResetStackStart;
- mReservedApLoop.AmdSevEntry (
- MwaitSupport,
- CpuMpData->ApTargetCState,
- CpuMpData->PmCodeSegment,
- StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
- (UINTN)&mNumberToFinish,
- CpuMpData->Pm16CodeSegment,
- CpuMpData->SevEsAPBuffer,
- CpuMpData->WakeupBuffer
- );
- } else {
- //
- // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
- //
- StackStart = mReservedTopOfApStack;
- mReservedApLoop.GenericEntry (
- MwaitSupport,
- CpuMpData->ApTargetCState,
- StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
- (UINTN)&mNumberToFinish,
- mApPageTable
- );
- }
-
- //
- // It should never reach here
- //
- ASSERT (FALSE);
-}
-
-/**
Allocate buffer for ApLoopCode.
@param[in] Pages Number of pages to allocate.
@@ -478,89 +420,6 @@ RemoveNxprotection ( }
/**
- Prepare ApLoopCode.
-
- @param[in] CpuMpData Pointer to CpuMpData.
-**/
-VOID
-PrepareApLoopCode (
- IN CPU_MP_DATA *CpuMpData
- )
-{
- EFI_PHYSICAL_ADDRESS Address;
- MP_ASSEMBLY_ADDRESS_MAP *AddressMap;
- UINT8 *ApLoopFunc;
- UINTN ApLoopFuncSize;
- UINTN StackPages;
- UINTN FuncPages;
- IA32_CR0 Cr0;
-
- AddressMap = &CpuMpData->AddressMap;
- if (CpuMpData->UseSevEsAPMethod) {
- //
- // 64-bit AMD processors with SEV-ES
- //
- Address = BASE_4GB - 1;
- ApLoopFunc = AddressMap->RelocateApLoopFuncAddressAmdSev;
- ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeAmdSev;
- } else {
- //
- // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
- //
- Address = MAX_ADDRESS;
- ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric;
- ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric;
- }
-
- //
- // Avoid APs access invalid buffer data which allocated by BootServices,
- // so we will allocate reserved data for AP loop code. We also need to
- // allocate this buffer below 4GB due to APs may be transferred to 32bit
- // protected mode on long mode DXE.
- // Allocating it in advance since memory services are not available in
- // Exit Boot Services callback function.
- //
- // +------------+ (TopOfApStack)
- // | Stack * N |
- // +------------+ (stack base, 4k aligned)
- // | Padding |
- // +------------+
- // | Ap Loop |
- // +------------+ ((low address, 4k-aligned)
- //
-
- StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE);
- FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize);
-
- AllocateApLoopCodeBuffer (StackPages + FuncPages, &Address);
- ASSERT (Address != 0);
-
- Cr0.UintN = AsmReadCr0 ();
- if (Cr0.Bits.PG != 0) {
- //
- // Make sure that the buffer memory is executable if NX protection is enabled
- // for EfiReservedMemoryType.
- //
- RemoveNxprotection (Address, EFI_PAGES_TO_SIZE (FuncPages));
- }
-
- mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages);
- ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
- mReservedApLoop.Data = (VOID *)(UINTN)Address;
- ASSERT (mReservedApLoop.Data != NULL);
- CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize);
- if (!CpuMpData->UseSevEsAPMethod) {
- //
- // processors without SEV-ES and paging is enabled
- //
- mApPageTable = CreatePageTable (
- (UINTN)Address,
- EFI_PAGES_TO_SIZE (StackPages+FuncPages)
- );
- }
-}
-
-/**
Callback function for ExitBootServices.
@param[in] Event Event whose notification function is being invoked.
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.c b/UefiCpuPkg/Library/MpInitLib/MpLib.c index 4bf3dc5fca..f97298887f 100644 --- a/UefiCpuPkg/Library/MpInitLib/MpLib.c +++ b/UefiCpuPkg/Library/MpInitLib/MpLib.c @@ -17,6 +17,11 @@ EFI_GUID mCpuInitMpLibHobGuid = CPU_INIT_MP_LIB_HOB_GUID; EFI_GUID mMpHandOffGuid = MP_HANDOFF_GUID;
EFI_GUID mMpHandOffConfigGuid = MP_HANDOFF_CONFIG_GUID;
+RELOCATE_AP_LOOP_ENTRY mReservedApLoop;
+UINTN mReservedTopOfApStack;
+volatile UINT32 mNumberToFinish = 0;
+UINTN mApPageTable;
+
/**
Save the volatile registers required to be restored following INIT IPI.
@@ -3240,3 +3245,140 @@ ConfidentialComputingGuestHas ( return (CurrentAttr == Attr);
}
+
+/**
+ Do sync on APs.
+
+ @param[in, out] Buffer Pointer to private data buffer.
+**/
+VOID
+EFIAPI
+RelocateApLoop (
+ IN OUT VOID *Buffer
+ )
+{
+ CPU_MP_DATA *CpuMpData;
+ BOOLEAN MwaitSupport;
+ UINTN ProcessorNumber;
+ UINTN StackStart;
+
+ MpInitLibWhoAmI (&ProcessorNumber);
+ CpuMpData = GetCpuMpData ();
+ MwaitSupport = IsMwaitSupport ();
+ if (CpuMpData->UseSevEsAPMethod) {
+ //
+ // 64-bit AMD processors with SEV-ES
+ //
+ StackStart = CpuMpData->SevEsAPResetStackStart;
+ mReservedApLoop.AmdSevEntry (
+ MwaitSupport,
+ CpuMpData->ApTargetCState,
+ CpuMpData->PmCodeSegment,
+ StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+ (UINTN)&mNumberToFinish,
+ CpuMpData->Pm16CodeSegment,
+ CpuMpData->SevEsAPBuffer,
+ CpuMpData->WakeupBuffer
+ );
+ } else {
+ //
+ // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
+ //
+ StackStart = mReservedTopOfApStack;
+ mReservedApLoop.GenericEntry (
+ MwaitSupport,
+ CpuMpData->ApTargetCState,
+ StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+ (UINTN)&mNumberToFinish,
+ mApPageTable
+ );
+ }
+
+ //
+ // It should never reach here
+ //
+ ASSERT (FALSE);
+}
+
+/**
+ Prepare ApLoopCode.
+
+ @param[in] CpuMpData Pointer to CpuMpData.
+**/
+VOID
+PrepareApLoopCode (
+ IN CPU_MP_DATA *CpuMpData
+ )
+{
+ EFI_PHYSICAL_ADDRESS Address;
+ MP_ASSEMBLY_ADDRESS_MAP *AddressMap;
+ UINT8 *ApLoopFunc;
+ UINTN ApLoopFuncSize;
+ UINTN StackPages;
+ UINTN FuncPages;
+ IA32_CR0 Cr0;
+
+ AddressMap = &CpuMpData->AddressMap;
+ if (CpuMpData->UseSevEsAPMethod) {
+ //
+ // 64-bit AMD processors with SEV-ES
+ //
+ Address = BASE_4GB - 1;
+ ApLoopFunc = AddressMap->RelocateApLoopFuncAddressAmdSev;
+ ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeAmdSev;
+ } else {
+ //
+ // Intel processors (32-bit or 64-bit), 32-bit AMD processors, or 64-bit AMD processors without SEV-ES
+ //
+ Address = MAX_ADDRESS;
+ ApLoopFunc = AddressMap->RelocateApLoopFuncAddressGeneric;
+ ApLoopFuncSize = AddressMap->RelocateApLoopFuncSizeGeneric;
+ }
+
+ //
+ // Avoid APs access invalid buffer data which allocated by BootServices,
+ // so we will allocate reserved data for AP loop code. We also need to
+ // allocate this buffer below 4GB due to APs may be transferred to 32bit
+ // protected mode on long mode DXE.
+ // Allocating it in advance since memory services are not available in
+ // Exit Boot Services callback function.
+ //
+ // +------------+ (TopOfApStack)
+ // | Stack * N |
+ // +------------+ (stack base, 4k aligned)
+ // | Padding |
+ // +------------+
+ // | Ap Loop |
+ // +------------+ ((low address, 4k-aligned)
+ //
+
+ StackPages = EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * AP_SAFE_STACK_SIZE);
+ FuncPages = EFI_SIZE_TO_PAGES (ApLoopFuncSize);
+
+ AllocateApLoopCodeBuffer (StackPages + FuncPages, &Address);
+ ASSERT (Address != 0);
+
+ Cr0.UintN = AsmReadCr0 ();
+ if (Cr0.Bits.PG != 0) {
+ //
+ // Make sure that the buffer memory is executable if NX protection is enabled
+ // for EfiReservedMemoryType.
+ //
+ RemoveNxprotection (Address, EFI_PAGES_TO_SIZE (FuncPages));
+ }
+
+ mReservedTopOfApStack = (UINTN)Address + EFI_PAGES_TO_SIZE (StackPages+FuncPages);
+ ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
+ mReservedApLoop.Data = (VOID *)(UINTN)Address;
+ ASSERT (mReservedApLoop.Data != NULL);
+ CopyMem (mReservedApLoop.Data, ApLoopFunc, ApLoopFuncSize);
+ if (!CpuMpData->UseSevEsAPMethod) {
+ //
+ // processors without SEV-ES and paging is enabled
+ //
+ mApPageTable = CreatePageTable (
+ (UINTN)Address,
+ EFI_PAGES_TO_SIZE (StackPages+FuncPages)
+ );
+ }
+}
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.h b/UefiCpuPkg/Library/MpInitLib/MpLib.h index 179f8e585b..a4a33bf538 100644 --- a/UefiCpuPkg/Library/MpInitLib/MpLib.h +++ b/UefiCpuPkg/Library/MpInitLib/MpLib.h @@ -1,7 +1,7 @@ /** @file
Common header file for MP Initialize Library.
- Copyright (c) 2016 - 2023, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2016 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2020 - 2024, AMD Inc. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -357,7 +357,8 @@ typedef IN UINTN StackStart
);
-extern EFI_GUID mCpuInitMpLibHobGuid;
+extern EFI_GUID mCpuInitMpLibHobGuid;
+extern volatile UINT32 mNumberToFinish;
/**
Assembly code to place AP into safe loop mode.
@@ -933,4 +934,52 @@ AmdSevUpdateCpuMpData ( IN CPU_MP_DATA *CpuMpData
);
+/**
+ Prepare ApLoopCode.
+
+ @param[in] CpuMpData Pointer to CpuMpData.
+**/
+VOID
+PrepareApLoopCode (
+ IN CPU_MP_DATA *CpuMpData
+ );
+
+/**
+ Do sync on APs.
+
+ @param[in, out] Buffer Pointer to private data buffer.
+**/
+VOID
+EFIAPI
+RelocateApLoop (
+ IN OUT VOID *Buffer
+ );
+
+/**
+ Allocate buffer for ApLoopCode.
+
+ @param[in] Pages Number of pages to allocate.
+ @param[in, out] Address Pointer to the allocated buffer.
+**/
+VOID
+AllocateApLoopCodeBuffer (
+ IN UINTN Pages,
+ IN OUT EFI_PHYSICAL_ADDRESS *Address
+ );
+
+/**
+ Remove Nx protection for the range specific by BaseAddress and Length.
+
+ The PEI implementation uses CpuPageTableLib to change the attribute.
+ The DXE implementation uses gDS to change the attribute.
+
+ @param[in] BaseAddress BaseAddress of the range.
+ @param[in] Length Length of the range.
+**/
+VOID
+RemoveNxprotection (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN Length
+ );
+
#endif
|