diff options
author | Eric Dong <eric.dong@intel.com> | 2018-10-15 10:34:59 +0800 |
---|---|---|
committer | Eric Dong <eric.dong@intel.com> | 2018-10-22 11:19:48 +0800 |
commit | 93324390582d6a5beaab226c74466e3b2497edca (patch) | |
tree | 9246d3619f42125e89968e936b45011c3acd2858 /UefiCpuPkg/PiSmmCpuDxeSmm | |
parent | b3c71b472dff2c02f0cc38d7a1959cfb2ba8420d (diff) | |
download | edk2-93324390582d6a5beaab226c74466e3b2497edca.tar.gz edk2-93324390582d6a5beaab226c74466e3b2497edca.tar.bz2 edk2-93324390582d6a5beaab226c74466e3b2497edca.zip |
UefiCpuPkg/PiSmmCpuDxeSmm: Add logic to support semaphore type.
V4 changes:
1. Serial console log for different threads when program register table.
2. Check the AcpiCpuData before use it to avoid potential ASSERT.
V3 changes:
1. Use global variable instead of internal function to return string for register type
and dependence type.
2. Add comments for some complicated logic.
V1 changes:
Because this driver needs to set MSRs saved in normal boot phase, sync
semaphore logic from RegisterCpuFeaturesLib code which used for normal boot phase.
Detail see below change for RegisterCpuFeaturesLib:
UefiCpuPkg/RegisterCpuFeaturesLib: Add logic to support semaphore type.
Cc: Ruiyu Ni <ruiyu.ni@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Eric Dong <eric.dong@intel.com>
Reviewed-by: Ruiyu Ni <ruiyu.ni@intel.com>
Acked-by: Laszlo Ersek <lersek@redhat.com>
Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm')
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c | 413 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 3 | ||||
-rw-r--r-- | UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 3 |
3 files changed, 276 insertions, 143 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c index 52ff9679d5..fec53c522f 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c @@ -38,9 +38,13 @@ typedef struct { } MP_ASSEMBLY_ADDRESS_MAP;
//
-// Spin lock used to serialize MemoryMapped operation
+// Flags used when program the register.
//
-SPIN_LOCK *mMemoryMappedLock = NULL;
+typedef struct {
+ volatile UINTN ConsoleLogLock; // Spinlock used to control console.
+ volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
+ volatile UINT32 *SemaphoreCount; // Semaphore used to program semaphore.
+} PROGRAM_CPU_REGISTER_FLAGS;
//
// Signal that SMM BASE relocation is complete.
@@ -62,13 +66,11 @@ AsmGetAddressMap ( #define LEGACY_REGION_SIZE (2 * 0x1000)
#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
+PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
ACPI_CPU_DATA mAcpiCpuData;
volatile UINT32 mNumberToFinish;
MP_CPU_EXCHANGE_INFO *mExchangeInfo;
BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
-MP_MSR_LOCK *mMsrSpinLocks = NULL;
-UINTN mMsrSpinLockCount;
-UINTN mMsrCount = 0;
//
// S3 boot flag
@@ -91,88 +93,7 @@ UINT8 mApHltLoopCodeTemplate[] = { 0xEB, 0xFC // jmp $-2
};
-/**
- Get MSR spin lock by MSR index.
-
- @param MsrIndex MSR index value.
-
- @return Pointer to MSR spin lock.
-
-**/
-SPIN_LOCK *
-GetMsrSpinLockByIndex (
- IN UINT32 MsrIndex
- )
-{
- UINTN Index;
- for (Index = 0; Index < mMsrCount; Index++) {
- if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
- return mMsrSpinLocks[Index].SpinLock;
- }
- }
- return NULL;
-}
-
-/**
- Initialize MSR spin lock by MSR index.
-
- @param MsrIndex MSR index value.
-
-**/
-VOID
-InitMsrSpinLockByIndex (
- IN UINT32 MsrIndex
- )
-{
- UINTN MsrSpinLockCount;
- UINTN NewMsrSpinLockCount;
- UINTN Index;
- UINTN AddedSize;
-
- if (mMsrSpinLocks == NULL) {
- MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
- mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
- ASSERT (mMsrSpinLocks != NULL);
- for (Index = 0; Index < MsrSpinLockCount; Index++) {
- mMsrSpinLocks[Index].SpinLock =
- (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
- mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
- }
- mMsrSpinLockCount = MsrSpinLockCount;
- mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
- }
- if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
- //
- // Initialize spin lock for MSR programming
- //
- mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
- InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
- mMsrCount ++;
- if (mMsrCount == mMsrSpinLockCount) {
- //
- // If MSR spin lock buffer is full, enlarge it
- //
- AddedSize = SIZE_4KB;
- mSmmCpuSemaphores.SemaphoreMsr.Msr =
- AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
- ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
- NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
- mMsrSpinLocks = ReallocatePool (
- sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
- sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
- mMsrSpinLocks
- );
- ASSERT (mMsrSpinLocks != NULL);
- mMsrSpinLockCount = NewMsrSpinLockCount;
- for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
- mMsrSpinLocks[Index].SpinLock =
- (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
- (Index - mMsrCount) * mSemaphoreSize);
- mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
- }
- }
- }
-}
+CHAR16 *mRegisterTypeStr[] = {L"MSR", L"CR", L"MMIO", L"CACHE", L"SEMAP", L"INVALID" };
/**
Sync up the MTRR values for all processors.
@@ -204,42 +125,103 @@ Returns: }
/**
- Programs registers for the calling processor.
+ Increment semaphore by 1.
- This function programs registers for the calling processor.
+ @param Sem IN: 32-bit unsigned integer
- @param RegisterTables Pointer to register table of the running processor.
- @param RegisterTableCount Register table count.
+**/
+VOID
+S3ReleaseSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ InterlockedIncrement (Sem);
+}
+
+/**
+ Decrement the semaphore by 1 if it is not zero.
+ Performs an atomic decrement operation for semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param Sem IN: 32-bit unsigned integer
+
+**/
+VOID
+S3WaitForSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (Value == 0 ||
+ InterlockedCompareExchange32 (
+ Sem,
+ Value,
+ Value - 1
+ ) != Value);
+}
+
+/**
+ Initialize the CPU registers from a register table.
+
+ @param[in] RegisterTable The register table for this AP.
+ @param[in] ApLocation AP location info for this ap.
+ @param[in] CpuStatus CPU status info for this CPU.
+ @param[in] CpuFlags Flags data structure used when program the register.
+
+ @note This service could be called by BSP/APs.
**/
VOID
-SetProcessorRegister (
- IN CPU_REGISTER_TABLE *RegisterTables,
- IN UINTN RegisterTableCount
+ProgramProcessorRegister (
+ IN CPU_REGISTER_TABLE *RegisterTable,
+ IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
+ IN CPU_STATUS_INFORMATION *CpuStatus,
+ IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
)
{
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
UINTN Index;
UINTN Value;
- SPIN_LOCK *MsrSpinLock;
- UINT32 InitApicId;
- CPU_REGISTER_TABLE *RegisterTable;
-
- InitApicId = GetInitialApicId ();
- RegisterTable = NULL;
- for (Index = 0; Index < RegisterTableCount; Index++) {
- if (RegisterTables[Index].InitialApicId == InitApicId) {
- RegisterTable = &RegisterTables[Index];
- break;
- }
- }
- ASSERT (RegisterTable != NULL);
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
+ volatile UINT32 *SemaphorePtr;
+ UINT32 FirstThread;
+ UINT32 PackageThreadsCount;
+ UINT32 CurrentThread;
+ UINTN ProcessorIndex;
+ UINTN ThreadIndex;
+ UINTN ValidThreadCount;
+ UINT32 *ValidCoreCountPerPackage;
//
// Traverse Register Table of this logical processor
//
- RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
- for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
+ RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
+
+ for (Index = 0; Index < RegisterTable->TableLength; Index++) {
+
+ RegisterTableEntry = &RegisterTableEntryHead[Index];
+
+ DEBUG_CODE_BEGIN ();
+ if (ApLocation != NULL) {
+ AcquireSpinLock (&CpuFlags->ConsoleLogLock);
+ ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +
+ ApLocation->Core * CpuStatus->MaxThreadCount +
+ ApLocation->Thread;
+ DEBUG ((
+ DEBUG_INFO,
+ "Processor = %lu, Entry Index %lu, Type = %s!\n",
+ (UINT64)ThreadIndex,
+ (UINT64)Index,
+ mRegisterTypeStr[MIN ((REGISTER_TYPE)RegisterTableEntry->RegisterType, InvalidReg)]
+ ));
+ ReleaseSpinLock (&CpuFlags->ConsoleLogLock);
+ }
+ DEBUG_CODE_END ();
+
//
// Check the type of specified register
//
@@ -311,12 +293,6 @@ SetProcessorRegister ( );
} else {
//
- // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
- // to make sure MSR read/write operation is atomic.
- //
- MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
- AcquireSpinLock (MsrSpinLock);
- //
// Set the bit section according to bit start and length
//
AsmMsrBitFieldWrite64 (
@@ -325,21 +301,20 @@ SetProcessorRegister ( RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
RegisterTableEntry->Value
);
- ReleaseSpinLock (MsrSpinLock);
}
break;
//
// MemoryMapped operations
//
case MemoryMapped:
- AcquireSpinLock (mMemoryMappedLock);
+ AcquireSpinLock (&CpuFlags->MemoryMappedLock);
MmioBitFieldWrite32 (
(UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
RegisterTableEntry->ValidBitStart,
RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
(UINT32)RegisterTableEntry->Value
);
- ReleaseSpinLock (mMemoryMappedLock);
+ ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
break;
//
// Enable or disable cache
@@ -355,6 +330,95 @@ SetProcessorRegister ( }
break;
+ case Semaphore:
+ // Semaphore works logic like below:
+ //
+ // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
+ // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
+ //
+ // All threads (T0...Tn) waits in P() line and continues running
+ // together.
+ //
+ //
+ // T0 T1 ... Tn
+ //
+ // V(0...n) V(0...n) ... V(0...n)
+ // n * P(0) n * P(1) ... n * P(n)
+ //
+ ASSERT (
+ (ApLocation != NULL) &&
+ (CpuStatus->ValidCoreCountPerPackage != 0) &&
+ (CpuFlags->SemaphoreCount) != NULL
+ );
+ SemaphorePtr = CpuFlags->SemaphoreCount;
+ switch (RegisterTableEntry->Value) {
+ case CoreDepType:
+ //
+ // Get Offset info for the first thread in the core which current thread belongs to.
+ //
+ FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
+ CurrentThread = FirstThread + ApLocation->Thread;
+ //
+ // First Notify all threads in current Core that this thread has ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
+ }
+ //
+ // Second, check whether all valid threads in current core have ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
+ }
+ break;
+
+ case PackageDepType:
+ ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;
+ //
+ // Get Offset info for the first thread in the package which current thread belongs to.
+ //
+ FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
+ //
+ // Get the possible threads count for current package.
+ //
+ PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
+ CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
+ //
+ // Get the valid thread count for current package.
+ //
+ ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package];
+
+ //
+ // Different packages may have different valid cores in them. If driver maintail clearly
+ // cores number in different packages, the logic will be much complicated.
+ // Here driver just simply records the max core number in all packages and use it as expect
+ // core number for all packages.
+ // In below two steps logic, first current thread will Release semaphore for each thread
+ // in current package. Maybe some threads are not valid in this package, but driver don't
+ // care. Second, driver will let current thread wait semaphore for all valid threads in
+ // current package. Because only the valid threads will do release semaphore for this
+ // thread, driver here only need to wait the valid thread count.
+ //
+
+ //
+ // First Notify all threads in current package that this thread has ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
+ }
+ //
+ // Second, check whether all valid threads in current package have ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
+ }
+ break;
+
+ default:
+ break;
+ }
+ break;
+
default:
break;
}
@@ -362,6 +426,58 @@ SetProcessorRegister ( }
/**
+
+ Set Processor register for one AP.
+
+ @param PreSmmRegisterTable Use pre Smm register table or register table.
+
+**/
+VOID
+SetRegister (
+ IN BOOLEAN PreSmmRegisterTable
+ )
+{
+ CPU_REGISTER_TABLE *RegisterTable;
+ CPU_REGISTER_TABLE *RegisterTables;
+ UINT32 InitApicId;
+ UINTN ProcIndex;
+ UINTN Index;
+
+ if (PreSmmRegisterTable) {
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
+ } else {
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
+ }
+
+ InitApicId = GetInitialApicId ();
+ RegisterTable = NULL;
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
+ if (RegisterTables[Index].InitialApicId == InitApicId) {
+ RegisterTable = &RegisterTables[Index];
+ ProcIndex = Index;
+ break;
+ }
+ }
+ ASSERT (RegisterTable != NULL);
+
+ if (mAcpiCpuData.ApLocation != 0) {
+ ProgramProcessorRegister (
+ RegisterTable,
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
+ &mAcpiCpuData.CpuStatus,
+ &mCpuFlags
+ );
+ } else {
+ ProgramProcessorRegister (
+ RegisterTable,
+ NULL,
+ &mAcpiCpuData.CpuStatus,
+ &mCpuFlags
+ );
+ }
+}
+
+/**
AP initialization before then after SMBASE relocation in the S3 boot path.
**/
VOID
@@ -374,7 +490,7 @@ InitializeAp ( LoadMtrrData (mAcpiCpuData.MtrrTable);
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
+ SetRegister (TRUE);
//
// Count down the number with lock mechanism.
@@ -391,7 +507,7 @@ InitializeAp ( ProgramVirtualWireMode ();
DisableLvtInterrupts ();
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
+ SetRegister (FALSE);
//
// Place AP into the safe code, count down the number with lock mechanism in the safe code.
@@ -466,7 +582,7 @@ InitializeCpuBeforeRebase ( {
LoadMtrrData (mAcpiCpuData.MtrrTable);
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
+ SetRegister (TRUE);
ProgramVirtualWireMode ();
@@ -502,15 +618,24 @@ InitializeCpuAfterRebase ( VOID
)
{
- SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
-
mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
//
- // Signal that SMM base relocation is complete and to continue initialization.
+ // Signal that SMM base relocation is complete and to continue initialization for all APs.
//
mInitApsAfterSmmBaseReloc = TRUE;
+ //
+ // Must begin set register after all APs have continue their initialization.
+ // This is a requirement to support semaphore mechanism in register table.
+ // Because if semaphore's dependence type is package type, semaphore will wait
+ // for all Aps in one package finishing their tasks before set next register
+ // for all APs. If the Aps not begin its task during BSP doing its task, the
+ // BSP thread will hang because it is waiting for other Aps in the same
+ // package finishing their task.
+ //
+ SetRegister (FALSE);
+
while (mNumberToFinish > 0) {
CpuPause ();
}
@@ -574,8 +699,6 @@ SmmRestoreCpu ( mSmmS3Flag = TRUE;
- InitializeSpinLock (mMemoryMappedLock);
-
//
// See if there is enough context to resume PEI Phase
//
@@ -790,7 +913,6 @@ CopyRegisterTable ( )
{
UINTN Index;
- UINTN Index1;
CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
@@ -802,17 +924,6 @@ CopyRegisterTable ( );
ASSERT (RegisterTableEntry != NULL);
DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
- //
- // Go though all MSRs in register table to initialize MSR spin lock
- //
- for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
- if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
- //
- // Initialize MSR spin lock only for those MSRs need bit field writing
- //
- InitMsrSpinLockByIndex (RegisterTableEntry->Index);
- }
- }
}
}
}
@@ -832,6 +943,7 @@ GetAcpiCpuData ( VOID *GdtForAp;
VOID *IdtForAp;
VOID *MachineCheckHandlerForAp;
+ CPU_STATUS_INFORMATION *CpuStatus;
if (!mAcpiS3Enable) {
return;
@@ -906,6 +1018,31 @@ GetAcpiCpuData ( Gdtr->Base = (UINTN)GdtForAp;
Idtr->Base = (UINTN)IdtForAp;
mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
+
+ CpuStatus = &mAcpiCpuData.CpuStatus;
+ CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
+ if (AcpiCpuData->CpuStatus.ValidCoreCountPerPackage != 0) {
+ CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
+ sizeof (UINT32) * CpuStatus->PackageCount,
+ (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoreCountPerPackage
+ );
+ ASSERT (CpuStatus->ValidCoreCountPerPackage != 0);
+ }
+ if (AcpiCpuData->ApLocation != 0) {
+ mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
+ mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
+ );
+ ASSERT (mAcpiCpuData.ApLocation != 0);
+ }
+ if (CpuStatus->PackageCount != 0) {
+ mCpuFlags.SemaphoreCount = AllocateZeroPool (
+ sizeof (UINT32) * CpuStatus->PackageCount *
+ CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount);
+ ASSERT (mCpuFlags.SemaphoreCount != NULL);
+ }
+ InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
+ InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.ConsoleLogLock);
}
/**
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c index 9cf508a5c7..42b040531e 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c @@ -1303,8 +1303,6 @@ InitializeSmmCpuSemaphores ( mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
= (SPIN_LOCK *)SemaphoreAddr;
SemaphoreAddr += SemaphoreSize;
- mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
- = (SPIN_LOCK *)SemaphoreAddr;
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
@@ -1321,7 +1319,6 @@ InitializeSmmCpuSemaphores ( mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
- mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
mSemaphoreSize = SemaphoreSize;
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h index 8c7f4996d1..e2970308fe 100644 --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h @@ -53,6 +53,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. #include <Library/ReportStatusCodeLib.h>
#include <Library/SmmCpuFeaturesLib.h>
#include <Library/PeCoffGetEntryPointLib.h>
+#include <Library/RegisterCpuFeaturesLib.h>
#include <AcpiCpuData.h>
#include <CpuHotPlugData.h>
@@ -364,7 +365,6 @@ typedef struct { volatile BOOLEAN *AllCpusInSync;
SPIN_LOCK *PFLock;
SPIN_LOCK *CodeAccessCheckLock;
- SPIN_LOCK *MemoryMappedLock;
} SMM_CPU_SEMAPHORE_GLOBAL;
///
@@ -409,7 +409,6 @@ extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores; extern UINTN mSemaphoreSize;
extern SPIN_LOCK *mPFLock;
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
-extern SPIN_LOCK *mMemoryMappedLock;
extern EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
extern UINTN mSmmCpuSmramRangeCount;
extern UINT8 mPhysicalAddressBits;
|