summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
diff options
context:
space:
mode:
authorWu, Jiaxin <jiaxin.wu@intel.com>2022-11-30 13:13:56 +0800
committermergify[bot] <37929162+mergify[bot]@users.noreply.github.com>2022-12-08 10:04:24 +0000
commitc14c4719f9372c62d3f43c1ca3d95989c65e9d88 (patch)
treeb662ddf249cfcd35956451d212aa621d7941ceb7 /UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
parent4e17aba4b55003dde8de2671e968c75245fdfcae (diff)
downloadedk2-c14c4719f9372c62d3f43c1ca3d95989c65e9d88.tar.gz
edk2-c14c4719f9372c62d3f43c1ca3d95989c65e9d88.tar.bz2
edk2-c14c4719f9372c62d3f43c1ca3d95989c65e9d88.zip
UefiCpuPkg: Check SMM Delayed/Blocked AP Count
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4173 Due to more core count increasement, it's hard to reflect all APs state via AP bitvector support in the register. Actually, SMM CPU driver doesn't need to check each AP state to know all CPUs in SMI or not, one alternative method is to check the SMM Delayed & Blocked AP Count number: APs in SMI + Blocked Count + Disabled Count >= All supported Aps (code comments explained why can be > All supported Aps) With above change, the returned value of "SmmRegSmmEnable" & "SmmRegSmmDelayed" & "SmmRegSmmBlocked" from SmmCpuFeaturesLib should be the AP count number within the existing CPU package. For register that return the bitvector state, require SmmCpuFeaturesGetSmmRegister() returns count number of all bit per logical processor within the same package. For register that return the AP count, require SmmCpuFeaturesGetSmmRegister() returns the register value directly. v3: - Refine the coding style v2: - Rename "mPackageBspInfo" to "mPackageFirstThreadIndex" - Clarify the expected value of "SmmRegSmmEnable" & "SmmRegSmmDelayed" & "SmmRegSmmBlocked" returned from SmmCpuFeaturesLib. - Thread: https://edk2.groups.io/g/devel/message/96722 v1: - Thread: https://edk2.groups.io/g/devel/message/96671 Cc: Eric Dong <eric.dong@intel.com> Reviewed-by: Ray Ni <ray.ni@intel.com> Cc: Zeng Star <star.zeng@intel.com> Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c195
1 files changed, 168 insertions, 27 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index c79da418e3..a0967eb69c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -24,6 +24,11 @@ SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
BOOLEAN mMachineCheckSupported = FALSE;
MM_COMPLETION mSmmStartupThisApToken;
+//
+// Processor specified by mPackageFirstThreadIndex[PackageIndex] will do the package-scope register check.
+//
+UINT32 *mPackageFirstThreadIndex = NULL;
+
extern UINTN mSmmShadowStackSize;
/**
@@ -157,50 +162,125 @@ ReleaseAllAPs (
}
/**
- Checks if all CPUs (with certain exceptions) have checked in for this SMI run
+ Check whether the index of CPU perform the package level register
+ programming during System Management Mode initialization.
- @param Exceptions CPU Arrival exception flags.
+ The index of Processor specified by mPackageFirstThreadIndex[PackageIndex]
+ will do the package-scope register programming.
- @retval TRUE if all CPUs the have checked in.
- @retval FALSE if at least one Normal AP hasn't checked in.
+ @param[in] CpuIndex Processor Index.
+
+ @retval TRUE Perform the package level register programming.
+ @retval FALSE Don't perform the package level register programming.
**/
BOOLEAN
-AllCpusInSmmWithExceptions (
- SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
+IsPackageFirstThread (
+ IN UINTN CpuIndex
)
{
- UINTN Index;
- SMM_CPU_DATA_BLOCK *CpuData;
- EFI_PROCESSOR_INFORMATION *ProcessorInfo;
+ UINT32 PackageIndex;
- ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
+ PackageIndex = gSmmCpuPrivate->ProcessorInfo[CpuIndex].Location.Package;
- if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
- return TRUE;
+ ASSERT (mPackageFirstThreadIndex != NULL);
+
+ //
+ // Set the value of mPackageFirstThreadIndex[PackageIndex].
+ // The package-scope register are checked by the first processor (CpuIndex) in Package.
+ //
+ // If mPackageFirstThreadIndex[PackageIndex] equals to (UINT32)-1, then update
+ // to current CpuIndex. If it doesn't equal to (UINT32)-1, don't change it.
+ //
+ if (mPackageFirstThreadIndex[PackageIndex] == (UINT32)-1) {
+ mPackageFirstThreadIndex[PackageIndex] = (UINT32)CpuIndex;
}
- CpuData = mSmmMpSyncData->CpuData;
- ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
- for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
- if (!(*(CpuData[Index].Present)) && (ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
- if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0)) {
- continue;
- }
+ return (BOOLEAN)(mPackageFirstThreadIndex[PackageIndex] == CpuIndex);
+}
+
+/**
+ Returns the Number of SMM Delayed & Blocked & Disabled Thread Count.
+
+ @param[in,out] DelayedCount The Number of SMM Delayed Thread Count.
+ @param[in,out] BlockedCount The Number of SMM Blocked Thread Count.
+ @param[in,out] DisabledCount The Number of SMM Disabled Thread Count.
+
+**/
+VOID
+GetSmmDelayedBlockedDisabledCount (
+ IN OUT UINT32 *DelayedCount,
+ IN OUT UINT32 *BlockedCount,
+ IN OUT UINT32 *DisabledCount
+ )
+{
+ UINTN Index;
- if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0)) {
- continue;
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (IsPackageFirstThread (Index)) {
+ if (DelayedCount != NULL) {
+ *DelayedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed);
}
- if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0)) {
- continue;
+ if (BlockedCount != NULL) {
+ *BlockedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked);
}
- return FALSE;
+ if (DisabledCount != NULL) {
+ *DisabledCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable);
+ }
}
}
+}
- return TRUE;
+/**
+ Checks if all CPUs (except Blocked & Disabled) have checked in for this SMI run
+
+ @retval TRUE if all CPUs the have checked in.
+ @retval FALSE if at least one Normal AP hasn't checked in.
+
+**/
+BOOLEAN
+AllCpusInSmmExceptBlockedDisabled (
+ VOID
+ )
+{
+ UINT32 BlockedCount;
+ UINT32 DisabledCount;
+
+ BlockedCount = 0;
+ DisabledCount = 0;
+
+ //
+ // Check to make sure mSmmMpSyncData->Counter is valid and not locked.
+ //
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ //
+ // Check whether all CPUs in SMM.
+ //
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
+ return TRUE;
+ }
+
+ //
+ // Check for the Blocked & Disabled Exceptions Case.
+ //
+ GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount);
+
+ //
+ // *mSmmMpSyncData->Counter might be updated by all APs concurrently. The value
+ // can be dynamic changed. If some Aps enter the SMI after the BlockedCount &
+ // DisabledCount check, then the *mSmmMpSyncData->Counter will be increased, thus
+ // leading the *mSmmMpSyncData->Counter + BlockedCount + DisabledCount > mNumberOfCpus.
+ // since the BlockedCount & DisabledCount are local variable, it's ok here only for
+ // the checking of all CPUs In Smm.
+ //
+ if (*mSmmMpSyncData->Counter + BlockedCount + DisabledCount >= mNumberOfCpus) {
+ return TRUE;
+ }
+
+ return FALSE;
}
/**
@@ -268,6 +348,11 @@ SmmWaitForApArrival (
UINTN Index;
BOOLEAN LmceEn;
BOOLEAN LmceSignal;
+ UINT32 DelayedCount;
+ UINT32 BlockedCount;
+
+ DelayedCount = 0;
+ BlockedCount = 0;
ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
@@ -296,7 +381,7 @@ SmmWaitForApArrival (
!IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
)
{
- mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
if (mSmmMpSyncData->AllApArrivedWithException) {
break;
}
@@ -337,7 +422,7 @@ SmmWaitForApArrival (
!IsSyncTimerTimeout (Timer);
)
{
- mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
if (mSmmMpSyncData->AllApArrivedWithException) {
break;
}
@@ -346,6 +431,14 @@ SmmWaitForApArrival (
}
}
+ if (!mSmmMpSyncData->AllApArrivedWithException) {
+ //
+ // Check for the Blocked & Delayed Case.
+ //
+ GetSmmDelayedBlockedDisabledCount (&DelayedCount, &BlockedCount, NULL);
+ DEBUG ((DEBUG_INFO, "SmmWaitForApArrival: Delayed AP Count = %d, Blocked AP Count = %d\n", DelayedCount, BlockedCount));
+ }
+
return;
}
@@ -739,6 +832,7 @@ APHandler (
if (mSmmMpSyncData->BspIndex != -1) {
//
// BSP Index is known
+ // Existing AP is in SMI now but BSP not in, so, try bring BSP in SMM.
//
BspIndex = mSmmMpSyncData->BspIndex;
ASSERT (CpuIndex != BspIndex);
@@ -763,12 +857,15 @@ APHandler (
//
// Give up since BSP is unable to enter SMM
// and signal the completion of this AP
+ // Reduce the mSmmMpSyncData->Counter!
+ //
WaitForSemaphore (mSmmMpSyncData->Counter);
return;
}
} else {
//
// Don't know BSP index. Give up without sending IPI to BSP.
+ // Reduce the mSmmMpSyncData->Counter!
//
WaitForSemaphore (mSmmMpSyncData->Counter);
return;
@@ -1668,10 +1765,13 @@ SmiRendezvous (
} else {
//
// Signal presence of this processor
+ // mSmmMpSyncData->Counter is increased here!
+ // "ReleaseSemaphore (mSmmMpSyncData->Counter) == 0" means BSP has already ended the synchronization.
//
if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
//
// BSP has already ended the synchronization, so QUIT!!!
+ // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!!
//
//
@@ -1784,6 +1884,47 @@ Exit:
}
/**
+ Initialize PackageBsp Info. Processor specified by mPackageFirstThreadIndex[PackageIndex]
+ will do the package-scope register programming. Set default CpuIndex to (UINT32)-1, which
+ means not specified yet.
+
+**/
+VOID
+InitPackageFirstThreadIndexInfo (
+ VOID
+ )
+{
+ UINT32 Index;
+ UINT32 PackageId;
+ UINT32 PackageCount;
+
+ PackageId = 0;
+ PackageCount = 0;
+
+ //
+ // Count the number of package, set to max PackageId + 1
+ //
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (PackageId < gSmmCpuPrivate->ProcessorInfo[Index].Location.Package) {
+ PackageId = gSmmCpuPrivate->ProcessorInfo[Index].Location.Package;
+ }
+ }
+
+ PackageCount = PackageId + 1;
+
+ mPackageFirstThreadIndex = (UINT32 *)AllocatePool (sizeof (UINT32) * PackageCount);
+ ASSERT (mPackageFirstThreadIndex != NULL);
+ if (mPackageFirstThreadIndex == NULL) {
+ return;
+ }
+
+ //
+ // Set default CpuIndex to (UINT32)-1, which means not specified yet.
+ //
+ SetMem32 (mPackageFirstThreadIndex, sizeof (UINT32) * PackageCount, (UINT32)-1);
+}
+
+/**
Allocate buffer for SpinLock and Wrapper function buffer.
**/