summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32
diff options
context:
space:
mode:
authorMichael Kinney <michael.d.kinney@intel.com>2015-10-19 19:13:13 +0000
committermdkinney <mdkinney@Edk2>2015-10-19 19:13:13 +0000
commit7947da3cccb5dfc973fe9ad9d814477ed978aea1 (patch)
treede6303c77acb21f5c627b093106d5196a65fad00 /UefiCpuPkg/PiSmmCpuDxeSmm/Ia32
parent529a5a860996b5e83941bab50a7b8604139264a1 (diff)
downloadedk2-7947da3cccb5dfc973fe9ad9d814477ed978aea1.tar.gz
edk2-7947da3cccb5dfc973fe9ad9d814477ed978aea1.tar.bz2
edk2-7947da3cccb5dfc973fe9ad9d814477ed978aea1.zip
UefiCpuPkg: Add PiSmmCpuDxeSmm module IA32 files
Add module that initializes a CPU for the SMM environment and installs the first level SMI handler. This module along with the SMM IPL and SMM Core provide the services required for DXE_SMM_DRIVERS to register hardware and software SMI handlers. CPU specific features are abstracted through the SmmCpuFeaturesLib Platform specific features are abstracted through the SmmCpuPlatformHookLib Several PCDs are added to enable/disable features and configure settings for the PiSmmCpuDxeSmm module [jeff.fan@intel.com: Fix code style issues reported by ECC] Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Michael Kinney <michael.d.kinney@intel.com> Reviewed-by: Jeff Fan <jeff.fan@intel.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@18646 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/Ia32')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.S165
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm168
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c132
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c48
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S191
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm193
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S911
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm738
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.S84
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.asm94
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c80
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h97
12 files changed, 2901 insertions, 0 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.S
new file mode 100644
index 0000000000..75aa312a6e
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.S
@@ -0,0 +1,165 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# MpFuncs.S
+#
+# Abstract:
+#
+# This is the assembly code for Multi-processor S3 support
+#
+#------------------------------------------------------------------------------
+
+.equ VacantFlag, 0x0
+.equ NotVacantFlag, 0xff
+
+.equ LockLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+.equ StackStart, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x04
+.equ StackSize, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x08
+.equ RendezvousProc, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x0C
+.equ GdtrProfile, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x10
+.equ IdtrProfile, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x16
+.equ BufferStart, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x1C
+
+#-------------------------------------------------------------------------------------
+#RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+#procedure serializes all the AP processors through an Init sequence. It must be
+#noted that APs arrive here very raw...ie: real mode, no stack.
+#ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+#IS IN MACHINE CODE.
+#-------------------------------------------------------------------------------------
+#RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+ASM_GLOBAL ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+# At this point CS = 0x(vv00) and ip= 0x0.
+
+ .byte 0x8c,0xc8 # mov ax, cs
+ .byte 0x8e,0xd8 # mov ds, ax
+ .byte 0x8e,0xc0 # mov es, ax
+ .byte 0x8e,0xd0 # mov ss, ax
+ .byte 0x33,0xc0 # xor ax, ax
+ .byte 0x8e,0xe0 # mov fs, ax
+ .byte 0x8e,0xe8 # mov gs, ax
+
+flat32Start:
+
+ .byte 0xBE
+ .word BufferStart
+ .byte 0x66,0x8B,0x14 # mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
+
+ .byte 0xBE
+ .word GdtrProfile
+ .byte 0x66 # db 66h
+ .byte 0x2E,0xF,0x1,0x14 # lgdt fword ptr cs:[si]
+
+ .byte 0xBE
+ .word IdtrProfile
+ .byte 0x66 # db 66h
+ .byte 0x2E,0xF,0x1,0x1C # lidt fword ptr cs:[si]
+
+ .byte 0x33,0xC0 # xor ax, ax
+ .byte 0x8E,0xD8 # mov ds, ax
+
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Get control register 0
+ .byte 0x66,0x83,0xC8,0x1 # or eax, 000000001h ; Set PE bit (bit #0)
+ .byte 0xF,0x22,0xC0 # mov cr0, eax
+
+FLAT32_JUMP:
+
+ .byte 0x66,0x67,0xEA # far jump
+ .long 0x0 # 32-bit offset
+ .word 0x20 # 16-bit selector
+
+PMODE_ENTRY: # protected mode entry point
+
+ movw $0x8,%ax
+ .byte 0x66
+ movw %ax,%ds
+ .byte 0x66
+ movw %ax,%es
+ .byte 0x66
+ movw %ax,%fs
+ .byte 0x66
+ movw %ax,%gs
+ .byte 0x66
+ movw %ax,%ss # Flat mode setup.
+
+ movl %edx,%esi
+
+ movl %esi,%edi
+ addl $LockLocation, %edi
+ movb $NotVacantFlag, %al
+TestLock:
+ xchgb (%edi), %al
+ cmpb $NotVacantFlag, %al
+ jz TestLock
+
+ProgramStack:
+
+ movl %esi,%edi
+ addl $StackSize, %edi
+ movl (%edi),%eax
+ movl %esi,%edi
+ addl $StackStart, %edi
+ addl (%edi),%eax
+ movl %eax,%esp
+ movl %eax,(%edi)
+
+Releaselock:
+
+ movb $VacantFlag, %al
+ movl %esi,%edi
+ addl $LockLocation, %edi
+ xchgb (%edi), %al
+
+ #
+ # Call assembly function to initialize FPU.
+ #
+ lea ASM_PFX(InitializeFloatingPointUnits), %ebx
+ call *%ebx
+ #
+ # Call C Function
+ #
+ movl %esi,%edi
+ addl $RendezvousProc, %edi
+ movl (%edi),%eax
+
+ testl %eax,%eax
+ jz GoToSleep
+ call *%eax # Call C function
+
+GoToSleep:
+ cli
+ hlt
+ jmp GoToSleep
+
+RendezvousFunnelProcEnd:
+#-------------------------------------------------------------------------------------
+# AsmGetAddressMap (&AddressMap);
+#-------------------------------------------------------------------------------------
+ASM_GLOBAL ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+
+ pushal
+ movl %esp,%ebp
+
+ movl 0x24(%ebp), %ebx
+ movl $RendezvousFunnelProcStart, (%ebx)
+ movl $(PMODE_ENTRY - RendezvousFunnelProcStart), 0x4(%ebx)
+ movl $(FLAT32_JUMP - RendezvousFunnelProcStart), 0x8(%ebx)
+ movl $(RendezvousFunnelProcEnd - RendezvousFunnelProcStart), 0x0c(%ebx)
+
+ popal
+ ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm
new file mode 100644
index 0000000000..70e24a8270
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.asm
@@ -0,0 +1,168 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; MpFuncs.asm
+;
+; Abstract:
+;
+; This is the assembly code for Multi-processor S3 support
+;
+;-------------------------------------------------------------------------------
+
+.686p
+.model flat,C
+.code
+
+EXTERN InitializeFloatingPointUnits:PROC
+
+VacantFlag Equ 00h
+NotVacantFlag Equ 0ffh
+
+LockLocation equ RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+StackStart equ LockLocation + 4h
+StackSize equ LockLocation + 8h
+RendezvousProc equ LockLocation + 0Ch
+GdtrProfile equ LockLocation + 10h
+IdtrProfile equ LockLocation + 16h
+BufferStart equ LockLocation + 1Ch
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+RendezvousFunnelProc PROC near C PUBLIC
+RendezvousFunnelProcStart::
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ db 8ch, 0c8h ; mov ax, cs
+ db 8eh, 0d8h ; mov ds, ax
+ db 8eh, 0c0h ; mov es, ax
+ db 8eh, 0d0h ; mov ss, ax
+ db 33h, 0c0h ; xor ax, ax
+ db 8eh, 0e0h ; mov fs, ax
+ db 8eh, 0e8h ; mov gs, ax
+
+flat32Start::
+
+ db 0BEh
+ dw BufferStart ; mov si, BufferStart
+ db 66h, 8Bh, 14h ; mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
+
+ db 0BEh
+ dw GdtrProfile ; mov si, GdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 14h ; lgdt fword ptr cs:[si]
+
+ db 0BEh
+ dw IdtrProfile ; mov si, IdtrProfile
+ db 66h ; db 66h
+ db 2Eh, 0Fh, 01h, 1Ch ; lidt fword ptr cs:[si]
+
+ db 33h, 0C0h ; xor ax, ax
+ db 8Eh, 0D8h ; mov ds, ax
+
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0
+ db 66h, 83h, 0C8h, 01h ; or eax, 000000001h ; Set PE bit (bit #0)
+ db 0Fh, 22h, 0C0h ; mov cr0, eax
+
+FLAT32_JUMP::
+
+ db 66h, 67h, 0EAh ; far jump
+ dd 0h ; 32-bit offset
+ dw 20h ; 16-bit selector
+
+PMODE_ENTRY:: ; protected mode entry point
+
+ mov ax, 8h
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax ; Flat mode setup.
+
+ mov esi, edx
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock::
+ xchg byte ptr [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack::
+
+ mov edi, esi
+ add edi, StackSize
+ mov eax, dword ptr [edi]
+ mov edi, esi
+ add edi, StackStart
+ add eax, dword ptr [edi]
+ mov esp, eax
+ mov dword ptr [edi], eax
+
+Releaselock::
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte ptr [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov ebx, InitializeFloatingPointUnits
+ call ebx
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, RendezvousProc
+ mov eax, dword ptr [edi]
+
+ test eax, eax
+ jz GoToSleep
+ call eax ; Call C function
+
+GoToSleep::
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProc ENDP
+RendezvousFunnelProcEnd::
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+AsmGetAddressMap PROC near C PUBLIC
+
+ pushad
+ mov ebp,esp
+
+ mov ebx, dword ptr [ebp+24h]
+ mov dword ptr [ebx], RendezvousFunnelProcStart
+ mov dword ptr [ebx+4h], PMODE_ENTRY - RendezvousFunnelProcStart
+ mov dword ptr [ebx+8h], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov dword ptr [ebx+0ch], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+
+ popad
+ ret
+
+AsmGetAddressMap ENDP
+
+END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
new file mode 100644
index 0000000000..edebaabb47
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -0,0 +1,132 @@
+/** @file
+Page table manipulation functions for IA-32 processors
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+SPIN_LOCK mPFLock;
+
+/**
+ Create PageTable for SMM use.
+
+ @return PageTable Address
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ UINTN PageFaultHandlerHookAddress;
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
+
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (&mPFLock);
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ //
+ // Set own Page Fault entry instead of the default one, because SMM Profile
+ // feature depends on IRET instruction to do Single Step
+ //
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
+ IdtEntry->Bits.Reserved_0 = 0;
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
+ } else {
+ //
+ // Register SMM Page Fault Handler
+ //
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+ }
+
+ //
+ // Additional SMM IDT initialization for SMM stack guard
+ //
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ InitializeIDTSmmStackGuard ();
+ }
+ return Gen4GPageTable (0);
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ CpuDeadLoop ();
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (&mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
+ CpuDeadLoop ();
+ }
+
+ //
+ // If a page fault occurs in SMM range
+ //
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
+ );
+ CpuDeadLoop ();
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextIa32->Eip,
+ SystemContext.SystemContextIa32->ExceptionData
+ );
+ } else {
+ SmiDefaultPFHandler ();
+ }
+
+ ReleaseSpinLock (&mPFLock);
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
new file mode 100644
index 0000000000..02a866b430
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
@@ -0,0 +1,48 @@
+/** @file
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+after SMBASE relocation.
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+UINTN mSmmRelocationOriginalAddress;
+volatile BOOLEAN *mRebasedFlag;
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE
+ immediately after AP exits SMM.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex,
+ IN volatile BOOLEAN *RebasedFlag
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuState;
+
+ mRebasedFlag = RebasedFlag;
+
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
+ mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (
+ CpuIndex,
+ CpuState,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
+ );
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
new file mode 100644
index 0000000000..6fcf41a677
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
@@ -0,0 +1,191 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmiEntry.S
+#
+# Abstract:
+#
+# Code template of the SMI handler for a particular processor
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)
+ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
+ASM_GLOBAL ASM_PFX(gSmiCr3)
+ASM_GLOBAL ASM_PFX(gSmiStack)
+ASM_GLOBAL ASM_PFX(gSmbase)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
+
+.equ DSC_OFFSET, 0xfb00
+.equ DSC_GDTPTR, 0x30
+.equ DSC_GDTSIZ, 0x38
+.equ DSC_CS, 14
+.equ DSC_DS, 16
+.equ DSC_SS, 18
+.equ DSC_OTHERSEG, 20
+
+.equ PROTECT_MODE_CS, 0x08
+.equ PROTECT_MODE_DS, 0x20
+.equ TSS_SEGMENT, 0x40
+
+ .text
+
+ASM_PFX(gcSmiHandlerTemplate):
+
+_SmiEntryPoint:
+ .byte 0xbb # mov bx, imm16
+ .word _GdtDesc - _SmiEntryPoint + 0x8000
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTSIZ
+ decl %eax
+ movl %eax, %cs:(%edi) # mov cs:[bx], ax
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
+ .word DSC_OFFSET + DSC_GDTPTR
+ movw %ax, %cs:2(%edi)
+ movw %ax, %bp # ebp = GDT base
+ .byte 0x66
+ lgdt %cs:(%edi)
+# Patch ProtectedMode Segment
+ .byte 0xb8 # mov ax, imm16
+ .word PROTECT_MODE_CS # set AX for segment directly
+ movl %eax, %cs:-2(%edi) # mov cs:[bx - 2], ax
+# Patch ProtectedMode entry
+ .byte 0x66, 0xbf # mov edi, SMBASE
+ASM_PFX(gSmbase): .space 4
+ .byte 0x67
+ lea ((Start32bit - _SmiEntryPoint) + 0x8000)(%edi), %ax
+ movw %ax, %cs:-6(%edi)
+ movl %cr0, %ebx
+ .byte 0x66
+ andl $0x9ffafff3, %ebx
+ .byte 0x66
+ orl $0x23, %ebx
+ movl %ebx, %cr0
+ .byte 0x66,0xea
+ .space 4
+ .space 2
+_GdtDesc: .space 4
+ .space 2
+
+Start32bit:
+ movw $PROTECT_MODE_DS, %ax
+ movl %eax,%ds
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+ movl %eax,%ss
+ .byte 0xbc # mov esp, imm32
+ASM_PFX(gSmiStack): .space 4
+ movl $ASM_PFX(gSmiHandlerIdtr), %eax
+ lidt (%eax)
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ .byte 0xb8 # mov eax, imm32
+ASM_PFX(gSmiCr3): .space 4
+ movl %eax, %cr3
+#
+# Need to test for CR4 specific bit support
+#
+ movl $1, %eax
+ cpuid # use CPUID to determine if specific CR4 bits are supported
+ xorl %eax, %eax # Clear EAX
+ testl $BIT2, %edx # Check for DE capabilities
+ jz L8
+ orl $BIT3, %eax
+L8:
+ testl $BIT6, %edx # Check for PAE capabilities
+ jz L9
+ orl $BIT5, %eax
+L9:
+ testl $BIT7, %edx # Check for MCE capabilities
+ jz L10
+ orl $BIT6, %eax
+L10:
+ testl $BIT24, %edx # Check for FXSR capabilities
+ jz L11
+ orl $BIT9, %eax
+L11:
+ testl $BIT25, %edx # Check for SSE capabilities
+ jz L12
+ orl $BIT10, %eax
+L12: # as cr4.PGE is not set here, refresh cr3
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
+ movl %cr0, %ebx
+ orl $0x080000000, %ebx # enable paging
+ movl %ebx, %cr0
+ leal DSC_OFFSET(%edi),%ebx
+ movw DSC_DS(%ebx),%ax
+ movl %eax, %ds
+ movw DSC_OTHERSEG(%ebx),%ax
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+ movw DSC_SS(%ebx),%ax
+ movl %eax, %ss
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ jz L5
+
+# Load TSS
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
+ movl $TSS_SEGMENT, %eax
+ ltrw %ax
+L5:
+
+# jmp _SmiHandler # instruction is not needed
+
+_SmiHandler:
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+ jz L3
+
+L6:
+ call L1
+L1:
+ popl %ebp
+ movl $0x80000001, %eax
+ cpuid
+ btl $29, %edx # check cpuid to identify X64 or IA32
+ leal (0x7fc8 - (L1 - _SmiEntryPoint))(%ebp), %edi
+ leal 4(%edi), %esi
+ jnc L2
+ addl $4, %esi
+L2:
+ movl (%esi), %ecx
+ movl (%edi), %edx
+L7:
+ movl %ecx, %dr6
+ movl %edx, %dr7 # restore DR6 & DR7 before running C code
+L3:
+
+ pushl (%esp)
+
+ movl $ASM_PFX(SmiRendezvous), %eax
+ call *%eax
+ popl %ecx
+
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))
+ jz L4
+
+ movl %dr6, %ecx
+ movl %dr7, %edx
+ movl %ecx, (%esi)
+ movl %edx, (%edi)
+L4:
+
+ rsm
+
+ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
new file mode 100644
index 0000000000..b628fe8bd8
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
@@ -0,0 +1,193 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiEntry.asm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+ .686p
+ .model flat,C
+ .xmm
+
+DSC_OFFSET EQU 0fb00h
+DSC_GDTPTR EQU 30h
+DSC_GDTSIZ EQU 38h
+DSC_CS EQU 14
+DSC_DS EQU 16
+DSC_SS EQU 18
+DSC_OTHERSEG EQU 20
+
+PROTECT_MODE_CS EQU 08h
+PROTECT_MODE_DS EQU 20h
+TSS_SEGMENT EQU 40h
+
+SmiRendezvous PROTO C
+
+EXTERNDEF gcSmiHandlerTemplate:BYTE
+EXTERNDEF gcSmiHandlerSize:WORD
+EXTERNDEF gSmiCr3:DWORD
+EXTERNDEF gSmiStack:DWORD
+EXTERNDEF gSmbase:DWORD
+EXTERNDEF FeaturePcdGet (PcdCpuSmmDebug):BYTE
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
+EXTERNDEF gSmiHandlerIdtr:FWORD
+
+ .code
+
+gcSmiHandlerTemplate LABEL BYTE
+
+_SmiEntryPoint:
+ DB 0bbh ; mov bx, imm16
+ DW offset _GdtDesc - _SmiEntryPoint + 8000h
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTSIZ
+ dec eax
+ mov cs:[edi], eax ; mov cs:[bx], ax
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]
+ DW DSC_OFFSET + DSC_GDTPTR
+ mov cs:[edi + 2], ax ; mov cs:[bx + 2], eax
+ mov bp, ax ; ebp = GDT base
+ DB 66h
+ lgdt fword ptr cs:[edi] ; lgdt fword ptr cs:[bx]
+; Patch ProtectedMode Segment
+ DB 0b8h ; mov ax, imm16
+ DW PROTECT_MODE_CS ; set AX for segment directly
+ mov cs:[edi - 2], eax ; mov cs:[bx - 2], ax
+; Patch ProtectedMode entry
+ DB 66h, 0bfh ; mov edi, SMBASE
+gSmbase DD ?
+ DB 67h
+ lea ax, [edi + (@32bit - _SmiEntryPoint) + 8000h]
+ mov cs:[edi - 6], ax ; mov cs:[bx - 6], eax
+ mov ebx, cr0
+ DB 66h
+ and ebx, 9ffafff3h
+ DB 66h
+ or ebx, 23h
+ mov cr0, ebx
+ DB 66h, 0eah
+ DD ?
+ DW ?
+_GdtDesc FWORD ?
+
+@32bit:
+ mov ax, PROTECT_MODE_DS
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
+ DB 0bch ; mov esp, imm32
+gSmiStack DD ?
+ mov eax, offset gSmiHandlerIdtr
+ lidt fword ptr [eax]
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ DB 0b8h ; mov eax, imm32
+gSmiCr3 DD ?
+ mov cr3, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ xor eax, eax ; Clear EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz @f
+ or eax, BIT3
+@@:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz @f
+ or eax, BIT5
+@@:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz @f
+ or eax, BIT6
+@@:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz @f
+ or eax, BIT9
+@@:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz @f
+ or eax, BIT10
+@@: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+ mov ebx, cr0
+ or ebx, 080000000h ; enable paging
+ mov cr0, ebx
+ lea ebx, [edi + DSC_OFFSET]
+ mov ax, [ebx + DSC_DS]
+ mov ds, eax
+ mov ax, [ebx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [ebx + DSC_SS]
+ mov ss, eax
+
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
+ jz @F
+
+; Load TSS
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+@@:
+; jmp _SmiHandler ; instruction is not needed
+
+_SmiHandler PROC
+ cmp FeaturePcdGet (PcdCpuSmmDebug), 0
+ jz @3
+ call @1
+@1:
+ pop ebp
+ mov eax, 80000001h
+ cpuid
+ bt edx, 29 ; check cpuid to identify X64 or IA32
+ lea edi, [ebp - (@1 - _SmiEntryPoint) + 7fc8h]
+ lea esi, [edi + 4]
+ jnc @2
+ add esi, 4
+@2:
+ mov ecx, [esi]
+ mov edx, [edi]
+@5:
+ mov dr6, ecx
+ mov dr7, edx ; restore DR6 & DR7 before running C code
+@3:
+ mov ecx, [esp] ; CPU Index
+
+ push ecx
+ mov eax, SmiRendezvous
+ call eax
+ pop ecx
+
+ cmp FeaturePcdGet (PcdCpuSmmDebug), 0
+ jz @4
+
+ mov ecx, dr6
+ mov edx, dr7
+ mov [esi], ecx
+ mov [edi], edx
+@4:
+ rsm
+_SmiHandler ENDP
+
+gcSmiHandlerSize DW $ - _SmiEntryPoint
+
+ END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
new file mode 100644
index 0000000000..69dfd946de
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
@@ -0,0 +1,911 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmiException.S
+#
+# Abstract:
+#
+# Exception handlers used in SM mode
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(SmiPFHandler)
+ASM_GLOBAL ASM_PFX(PageFaultStubFunction)
+ASM_GLOBAL ASM_PFX(gSmiMtrrs)
+ASM_GLOBAL ASM_PFX(gcSmiIdtr)
+ASM_GLOBAL ASM_PFX(gcSmiGdtr)
+ASM_GLOBAL ASM_PFX(gcPsd)
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+
+ .data
+
+NullSeg: .quad 0 # reserved by architecture
+CodeSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+ProtModeCodeSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+ProtModeSsSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x93
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+DataSeg32:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x93
+ .byte 0xcf # LimitHigh
+ .byte 0 # BaseHigh
+CodeSeg16:
+ .word -1
+ .word 0
+ .byte 0
+ .byte 0x9b
+ .byte 0x8f
+ .byte 0
+DataSeg16:
+ .word -1
+ .word 0
+ .byte 0
+ .byte 0x93
+ .byte 0x8f
+ .byte 0
+CodeSeg64:
+ .word -1 # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x9b
+ .byte 0xaf # LimitHigh
+ .byte 0 # BaseHigh
+.equ GDT_SIZE, .- NullSeg
+
+TssSeg:
+ .word TSS_DESC_SIZE # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x89
+ .byte 0x80 # LimitHigh
+ .byte 0 # BaseHigh
+ExceptionTssSeg:
+ .word TSS_DESC_SIZE # LimitLow
+ .word 0 # BaseLow
+ .byte 0 # BaseMid
+ .byte 0x89
+ .byte 0x80 # LimitHigh
+ .byte 0 # BaseHigh
+
+.equ CODE_SEL, CodeSeg32 - NullSeg
+.equ DATA_SEL, DataSeg32 - NullSeg
+.equ TSS_SEL, TssSeg - NullSeg
+.equ EXCEPTION_TSS_SEL, ExceptionTssSeg - NullSeg
+
+# IA32 TSS fields
+.equ TSS_ESP0, 4
+.equ TSS_SS0, 8
+.equ TSS_ESP1, 12
+.equ TSS_SS1, 16
+.equ TSS_ESP2, 20
+.equ TSS_SS2, 24
+.equ TSS_CR3, 28
+.equ TSS_EIP, 32
+.equ TSS_EFLAGS, 36
+.equ TSS_EAX, 40
+.equ TSS_ECX, 44
+.equ TSS_EDX, 48
+.equ TSS_EBX, 52
+.equ TSS_ESP, 56
+.equ TSS_EBP, 60
+.equ TSS_ESI, 64
+.equ TSS_EDI, 68
+.equ TSS_ES, 72
+.equ TSS_CS, 76
+.equ TSS_SS, 80
+.equ TSS_DS, 84
+.equ TSS_FS, 88
+.equ TSS_GS, 92
+.equ TSS_LDT, 96
+
+# Create 2 TSS segments just after GDT
+TssDescriptor:
+ .word 0 # PreviousTaskLink
+ .word 0 # Reserved
+ .long 0 # ESP0
+ .word 0 # SS0
+ .word 0 # Reserved
+ .long 0 # ESP1
+ .word 0 # SS1
+ .word 0 # Reserved
+ .long 0 # ESP2
+ .word 0 # SS2
+ .word 0 # Reserved
+ .long 0 # CR3
+ .long 0 # EIP
+ .long 0 # EFLAGS
+ .long 0 # EAX
+ .long 0 # ECX
+ .long 0 # EDX
+ .long 0 # EBX
+ .long 0 # ESP
+ .long 0 # EBP
+ .long 0 # ESI
+ .long 0 # EDI
+ .word 0 # ES
+ .word 0 # Reserved
+ .word 0 # CS
+ .word 0 # Reserved
+ .word 0 # SS
+ .word 0 # Reserved
+ .word 0 # DS
+ .word 0 # Reserved
+ .word 0 # FS
+ .word 0 # Reserved
+ .word 0 # GS
+ .word 0 # Reserved
+ .word 0 # LDT Selector
+ .word 0 # Reserved
+ .word 0 # T
+ .word 0 # I/O Map Base
+.equ TSS_DESC_SIZE, . - TssDescriptor
+
+ExceptionTssDescriptor:
+ .word 0 # PreviousTaskLink
+ .word 0 # Reserved
+ .long 0 # ESP0
+ .word 0 # SS0
+ .word 0 # Reserved
+ .long 0 # ESP1
+ .word 0 # SS1
+ .word 0 # Reserved
+ .long 0 # ESP2
+ .word 0 # SS2
+ .word 0 # Reserved
+ .long 0 # CR3
+ .long PFHandlerEntry # EIP
+ .long 00000002 # EFLAGS
+ .long 0 # EAX
+ .long 0 # ECX
+ .long 0 # EDX
+ .long 0 # EBX
+ .long 0 # ESP
+ .long 0 # EBP
+ .long 0 # ESI
+ .long 0 # EDI
+ .word DATA_SEL # ES
+ .word 0 # Reserved
+ .word CODE_SEL # CS
+ .word 0 # Reserved
+ .word DATA_SEL # SS
+ .word 0 # Reserved
+ .word DATA_SEL # DS
+ .word 0 # Reserved
+ .word DATA_SEL # FS
+ .word 0 # Reserved
+ .word DATA_SEL # GS
+ .word 0 # Reserved
+ .word 0 # LDT Selector
+ .word 0 # Reserved
+ .word 0 # T
+ .word 0 # I/O Map Base
+
+ASM_PFX(gcPsd):
+ .ascii "PSDSIG "
+ .word PSD_SIZE
+ .word 2
+ .word 1 << 2
+ .word CODE_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word DATA_SEL
+ .word 0
+ .long 0
+ .long 0
+ .long 0
+ .long 0
+ .quad 0
+ .long NullSeg
+ .long 0
+ .long GDT_SIZE
+ .long 0
+ .space 24, 0
+ .long ASM_PFX(gSmiMtrrs)
+ .long 0
+.equ PSD_SIZE, . - ASM_PFX(gcPsd)
+
+ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1
+ .long NullSeg
+
+ASM_PFX(gcSmiIdtr): .word IDT_SIZE - 1
+ .long _SmiIDT
+
+_SmiIDT:
+# The following segment repeats 32 times:
+# No. 1
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 2
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 3
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 4
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 5
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 6
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 7
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 8
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 9
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 10
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 11
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 12
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 13
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 14
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 15
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 16
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 17
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 18
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 19
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 20
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 21
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 22
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 23
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 24
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 25
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 26
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 27
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 28
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 29
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 30
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 31
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+# No. 32
+ .word 0 # Offset 0:15
+ .word CODE_SEL
+ .byte 0 # Unused
+ .byte 0x8e # Interrupt Gate, Present
+ .word 0 # Offset 16:31
+
+.equ IDT_SIZE, . - _SmiIDT
+
+TaskGateDescriptor:
+ .word 0 # Reserved
+ .word EXCEPTION_TSS_SEL # TSS Segment selector
+ .byte 0 # Reserved
+ .byte 0x85 # Task Gate, present, DPL = 0
+ .word 0 # Reserved
+
+ .text
+
+#------------------------------------------------------------------------------
+# PageFaultIdtHandlerSmmProfile is the entry point for all exceptions
+#
+# Stack:
+#+---------------------+
+#+ EFlags +
+#+---------------------+
+#+ CS +
+#+---------------------+
+#+ EIP +
+#+---------------------+
+#+ Error Code +
+#+---------------------+
+#+ Vector Number +
+#+---------------------+
+#+ EBP +
+#+---------------------+ <-- EBP
+#
+# RSP set to odd multiple of 8 means ErrCode PRESENT
+#------------------------------------------------------------------------------
+ASM_GLOBAL ASM_PFX(PageFaultIdtHandlerSmmProfile)
+ASM_PFX(PageFaultIdtHandlerSmmProfile):
+ pushl $0x0e # Page Fault
+ pushl %ebp
+ movl %esp, %ebp
+
+
+ #
+ # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ # is 16-byte aligned
+ #
+ andl $0xfffffff0, %esp
+ subl $12, %esp
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushl %ebx
+ leal (6*4)(%ebp), %ecx
+ pushl %ecx # ESP
+ pushl (%ebp) # EBP
+ pushl %esi
+ pushl %edi
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movl %ss, %eax
+ pushl %eax
+ movzwl (4*4)(%ebp), %eax
+ pushl %eax
+ movl %ds, %eax
+ pushl %eax
+ movl %es, %eax
+ pushl %eax
+ movl %fs, %eax
+ pushl %eax
+ movl %gs, %eax
+ pushl %eax
+
+## UINT32 Eip;
+ movl (3*4)(%ebp), %eax
+ pushl %eax
+
+## UINT32 Gdtr[2], Idtr[2];
+ subl $8, %esp
+ sidt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xffff, %eax
+ movl %eax, 4(%esp)
+
+ subl $8, %esp
+ sgdt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xffff, %eax
+ movl %eax, 4(%esp)
+
+## UINT32 Ldtr, Tr;
+ xorl %eax, %eax
+ strw %ax
+ pushl %eax
+ sldtw %ax
+ pushl %eax
+
+## UINT32 EFlags;
+ movl (5*4)(%ebp), %eax
+ pushl %eax
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ movl %cr4, %eax
+ orl $0x208, %eax
+ movl %eax, %cr4
+ pushl %eax
+ movl %cr3, %eax
+ pushl %eax
+ movl %cr2, %eax
+ pushl %eax
+ xorl %eax, %eax
+ pushl %eax
+ movl %cr0, %eax
+ pushl %eax
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ movl %dr7, %eax
+ pushl %eax
+ movl %dr6, %eax
+ pushl %eax
+ movl %dr3, %eax
+ pushl %eax
+ movl %dr2, %eax
+ pushl %eax
+ movl %dr1, %eax
+ pushl %eax
+ movl %dr0, %eax
+ pushl %eax
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ subl $512, %esp
+ movl %esp, %edi
+ .byte 0x0f, 0xae, 0x07 #fxsave [edi]
+
+# UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+## UINT32 ExceptionData;
+ pushl (2*4)(%ebp)
+
+## call into exception handler
+
+## Prepare parameter and call
+ movl %esp, %edx
+ pushl %edx
+ movl (1*4)(%ebp), %edx
+ pushl %edx
+
+ #
+ # Call External Exception Handler
+ #
+ movl $ASM_PFX(SmiPFHandler), %eax
+ call *%eax
+ addl $8, %esp
+ jmp L4
+
+L4:
+## UINT32 ExceptionData;
+ addl $4, %esp
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ movl %esp, %esi
+ .byte 0xf, 0xae, 0xe # fxrstor [esi]
+ addl $512, %esp
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+## Skip restoration of DRx registers to support debuggers
+## that set breakpoints in interrupt/exception context
+ addl $4*6, %esp
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ popl %eax
+ movl %eax, %cr0
+ addl $4, %esp # not for Cr1
+ popl %eax
+ movl %eax, %cr2
+ popl %eax
+ movl %eax, %cr3
+ popl %eax
+ movl %eax, %cr4
+
+## UINT32 EFlags;
+ popl (5*4)(%ebp)
+
+## UINT32 Ldtr, Tr;
+## UINT32 Gdtr[2], Idtr[2];
+## Best not let anyone mess with these particular registers...
+ addl $24, %esp
+
+## UINT32 Eip;
+ popl (3*4)(%ebp)
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+## NOTE - modified segment registers could hang the debugger... We
+## could attempt to insulate ourselves against this possibility,
+## but that poses risks as well.
+##
+ popl %gs
+ popl %fs
+ popl %es
+ popl %ds
+ popl (4*4)(%ebp)
+ popl %ss
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ popl %edi
+ popl %esi
+ addl $4, %esp # not for ebp
+ addl $4, %esp # not for esp
+ popl %ebx
+ popl %edx
+ popl %ecx
+ popl %eax
+
+ movl %ebp, %esp
+ popl %ebp
+
+# Enable TF bit after page fault handler runs
+ btsl $8, 16(%esp) # EFLAGS
+
+ addl $8, %esp # skip INT# & ErrCode
+Return:
+ iret
+#
+# Page Fault Exception Handler entry when SMM Stack Guard is enabled
+# Executiot starts here after a task switch
+#
+PFHandlerEntry:
+#
+# Get this processor's TSS
+#
+ subl $8, %esp
+ sgdt 2(%esp)
+ movl 4(%esp), %eax # GDT base
+ addl $8, %esp
+ movl (TSS_SEL+2)(%eax), %ecx
+ shll $8, %ecx
+ movb (TSS_SEL+7)(%eax), %cl
+ rorl $8, %ecx # ecx = TSS base
+
+ movl %esp, %ebp
+
+ #
+ # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ # is 16-byte aligned
+ #
+ andl $0xfffffff0, %esp
+ subl $12, %esp
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pushl TSS_EAX(%ecx)
+ pushl TSS_ECX(%ecx)
+ pushl TSS_EDX(%ecx)
+ pushl TSS_EBX(%ecx)
+ pushl TSS_ESP(%ecx)
+ pushl TSS_EBP(%ecx)
+ pushl TSS_ESI(%ecx)
+ pushl TSS_EDI(%ecx)
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movzwl TSS_SS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_CS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_DS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_ES(%ecx), %eax
+ pushl %eax
+ movzwl TSS_FS(%ecx), %eax
+ pushl %eax
+ movzwl TSS_GS(%ecx), %eax
+ pushl %eax
+
+## UINT32 Eip;
+ pushl TSS_EIP(%ecx)
+
+## UINT32 Gdtr[2], Idtr[2];
+ subl $8, %esp
+ sidt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xFFFF, %eax
+ movl %eax, 4(%esp)
+
+ subl $8, %esp
+ sgdt (%esp)
+ movl 2(%esp), %eax
+ xchgl (%esp), %eax
+ andl $0xFFFF, %eax
+ movl %eax, 4(%esp)
+
+## UINT32 Ldtr, Tr;
+ movl $TSS_SEL, %eax
+ pushl %eax
+ movzwl TSS_LDT(%ecx), %eax
+ pushl %eax
+
+## UINT32 EFlags;
+ pushl TSS_EFLAGS(%ecx)
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ movl %cr4, %eax
+ orl $0x208, %eax
+ movl %eax, %cr4
+ pushl %eax
+ movl %cr3, %eax
+ pushl %eax
+ movl %cr2, %eax
+ pushl %eax
+ xorl %eax, %eax
+ pushl %eax
+ movl %cr0, %eax
+ pushl %eax
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ movl %dr7, %eax
+ pushl %eax
+ movl %dr6, %eax
+ pushl %eax
+ movl %dr3, %eax
+ pushl %eax
+ movl %dr2, %eax
+ pushl %eax
+ movl %dr1, %eax
+ pushl %eax
+ movl %dr0, %eax
+ pushl %eax
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+## Clear TS bit in CR0 to avoid Device Not Available Exception (#NM)
+## when executing fxsave/fxrstor instruction
+ clts
+ subl $512, %esp
+ movl %esp, %edi
+ .byte 0x0f, 0xae, 0x07 #fxsave [edi]
+
+# UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+## UINT32 ExceptionData;
+ pushl (%ebp)
+
+## call into exception handler
+ movl %ecx, %ebx
+ movl $ASM_PFX(SmiPFHandler), %eax
+
+## Prepare parameter and call
+ movl %esp, %edx
+ pushl %edx
+ movl $14, %edx
+ pushl %edx
+
+ #
+ # Call External Exception Handler
+ #
+ call *%eax
+ addl $8, %esp
+
+ movl %ebx, %ecx
+## UINT32 ExceptionData;
+ addl $4, %esp
+
+## FX_SAVE_STATE_IA32 FxSaveState;
+ movl %esp, %esi
+ .byte 0xf, 0xae, 0xe # fxrstor [esi]
+ addl $512, %esp
+
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+## Skip restoration of DRx registers to support debuggers
+## that set breakpoints in interrupt/exception context
+ addl $4*6, %esp
+
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ popl %eax
+ movl %eax, %cr0
+ addl $4, %esp # not for Cr1
+ popl %eax
+ movl %eax, %cr2
+ popl %eax
+ movl %eax, TSS_CR3(%ecx)
+ popl %eax
+ movl %eax, %cr4
+
+## UINT32 EFlags;
+ popl TSS_EFLAGS(%ecx)
+
+## UINT32 Ldtr, Tr;
+## UINT32 Gdtr[2], Idtr[2];
+## Best not let anyone mess with these particular registers...
+ addl $24, %esp
+
+## UINT32 Eip;
+ popl TSS_EIP(%ecx)
+
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+## NOTE - modified segment registers could hang the debugger... We
+## could attempt to insulate ourselves against this possibility,
+## but that poses risks as well.
+##
+ popl %eax
+ movw %ax, TSS_GS(%ecx)
+ popl %eax
+ movw %ax, TSS_FS(%ecx)
+ popl %eax
+ movw %ax, TSS_ES(%ecx)
+ popl %eax
+ movw %ax, TSS_DS(%ecx)
+ popl %eax
+ movw %ax, TSS_CS(%ecx)
+ popl %eax
+ movw %ax, TSS_SS(%ecx)
+
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ popl TSS_EDI(%ecx)
+ popl TSS_ESI(%ecx)
+ addl $4, %esp # not for ebp
+ addl $4, %esp # not for esp
+ popl TSS_EBX(%ecx)
+ popl TSS_EDX(%ecx)
+ popl TSS_ECX(%ecx)
+ popl TSS_EAX(%ecx)
+
+ movl %ebp, %esp
+
+# Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+ jz Done2
+# Create return context for iret in stub function
+ movl TSS_ESP(%ecx), %eax # Get old stack pointer
+ movl TSS_EIP(%ecx), %ebx
+ movl %ebx, -0xc(%eax) # create EIP in old stack
+ movzwl TSS_CS(%ecx), %ebx
+ movl %ebx, -0x8(%eax) # create CS in old stack
+ movl TSS_EFLAGS(%ecx), %ebx
+ btsl $8,%ebx
+ movl %ebx, -0x4(%eax) # create eflags in old stack
+ movl TSS_ESP(%ecx), %eax # Get old stack pointer
+ subl $12, %eax # minus 12 byte
+ movl %eax, TSS_ESP(%ecx) # Set new stack pointer
+
+# Replace the EIP of interrupted task with stub function
+ movl $ASM_PFX(PageFaultStubFunction), %eax
+ movl %eax, TSS_EIP(%ecx)
+# Jump to the iret so next page fault handler as a task will start again after iret.
+
+Done2:
+
+ addl $4, %esp # skip ErrCode
+
+ jmp Return
+
+ASM_PFX(PageFaultStubFunction):
+#
+# we need clean TS bit in CR0 to execute
+# x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.
+#
+ clts
+ iret
+
+ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)
+ASM_PFX(InitializeIDTSmmStackGuard):
+ pushl %ebx
+#
+# If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+# is a Task Gate Descriptor so that when a Page Fault Exception occurs,
+# the processors can use a known good stack in case stack ran out.
+#
+ leal _SmiIDT + 14 * 8, %ebx
+ leal TaskGateDescriptor, %edx
+ movl (%edx), %eax
+ movl %eax, (%ebx)
+ movl 4(%edx), %eax
+ movl %eax, 4(%ebx)
+
+ popl %ebx
+ ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
new file mode 100644
index 0000000000..65a120e1e7
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
@@ -0,0 +1,738 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmiException.asm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+ .686p
+ .model flat,C
+
+EXTERNDEF SmiPFHandler:PROC
+EXTERNDEF PageFaultStubFunction:PROC
+EXTERNDEF gSmiMtrrs:QWORD
+EXTERNDEF gcSmiIdtr:FWORD
+EXTERNDEF gcSmiGdtr:FWORD
+EXTERNDEF gcPsd:BYTE
+EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE
+
+
+ .data
+
+NullSeg DQ 0 ; reserved by architecture
+CodeSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeCodeSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeSsSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 93h
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 93h
+ DB 0cfh ; LimitHigh
+ DB 0 ; BaseHigh
+CodeSeg16 LABEL QWORD
+ DW -1
+ DW 0
+ DB 0
+ DB 9bh
+ DB 8fh
+ DB 0
+DataSeg16 LABEL QWORD
+ DW -1
+ DW 0
+ DB 0
+ DB 93h
+ DB 8fh
+ DB 0
+CodeSeg64 LABEL QWORD
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 9bh
+ DB 0afh ; LimitHigh
+ DB 0 ; BaseHigh
+GDT_SIZE = $ - offset NullSeg
+
+TssSeg LABEL QWORD
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 89h
+ DB 080h ; LimitHigh
+ DB 0 ; BaseHigh
+ExceptionTssSeg LABEL QWORD
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 89h
+ DB 080h ; LimitHigh
+ DB 0 ; BaseHigh
+
+CODE_SEL = offset CodeSeg32 - offset NullSeg
+DATA_SEL = offset DataSeg32 - offset NullSeg
+TSS_SEL = offset TssSeg - offset NullSeg
+EXCEPTION_TSS_SEL = offset ExceptionTssSeg - offset NullSeg
+
+IA32_TSS STRUC
+ DW ?
+ DW ?
+ ESP0 DD ?
+ SS0 DW ?
+ DW ?
+ ESP1 DD ?
+ SS1 DW ?
+ DW ?
+ ESP2 DD ?
+ SS2 DW ?
+ DW ?
+ _CR3 DD ?
+ EIP DD ?
+ EFLAGS DD ?
+ _EAX DD ?
+ _ECX DD ?
+ _EDX DD ?
+ _EBX DD ?
+ _ESP DD ?
+ _EBP DD ?
+ _ESI DD ?
+ _EDI DD ?
+ _ES DW ?
+ DW ?
+ _CS DW ?
+ DW ?
+ _SS DW ?
+ DW ?
+ _DS DW ?
+ DW ?
+ _FS DW ?
+ DW ?
+ _GS DW ?
+ DW ?
+ LDT DW ?
+ DW ?
+ DW ?
+ DW ?
+IA32_TSS ENDS
+
+; Create 2 TSS segments just after GDT
+TssDescriptor LABEL BYTE
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD 0 ; EIP
+ DD 0 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW 0 ; ES
+ DW 0 ; Reserved
+ DW 0 ; CS
+ DW 0 ; Reserved
+ DW 0 ; SS
+ DW 0 ; Reserved
+ DW 0 ; DS
+ DW 0 ; Reserved
+ DW 0 ; FS
+ DW 0 ; Reserved
+ DW 0 ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+TSS_DESC_SIZE = $ - offset TssDescriptor
+
+ExceptionTssDescriptor LABEL BYTE
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD offset PFHandlerEntry ; EIP
+ DD 00000002 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW DATA_SEL ; ES
+ DW 0 ; Reserved
+ DW CODE_SEL ; CS
+ DW 0 ; Reserved
+ DW DATA_SEL ; SS
+ DW 0 ; Reserved
+ DW DATA_SEL ; DS
+ DW 0 ; Reserved
+ DW DATA_SEL ; FS
+ DW 0 ; Reserved
+ DW DATA_SEL ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+
+gcPsd LABEL BYTE
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 SHL 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0
+ DQ offset NullSeg
+ DD GDT_SIZE
+ DD 0
+ DB 24 dup (0)
+ DQ offset gSmiMtrrs
+PSD_SIZE = $ - offset gcPsd
+
+gcSmiGdtr LABEL FWORD
+ DW GDT_SIZE - 1
+ DD offset NullSeg
+
+gcSmiIdtr LABEL FWORD
+ DW IDT_SIZE - 1
+ DD offset _SmiIDT
+
+_SmiIDT LABEL QWORD
+REPEAT 32
+ DW 0 ; Offset 0:15
+ DW CODE_SEL ; Segment selector
+ DB 0 ; Unused
+ DB 8eh ; Interrupt Gate, Present
+ DW 0 ; Offset 16:31
+ ENDM
+IDT_SIZE = $ - offset _SmiIDT
+
+TaskGateDescriptor LABEL DWORD
+ DW 0 ; Reserved
+ DW EXCEPTION_TSS_SEL ; TSS Segment selector
+ DB 0 ; Reserved
+ DB 85h ; Task Gate, present, DPL = 0
+ DW 0 ; Reserved
+
+
+ .code
+;------------------------------------------------------------------------------
+; PageFaultIdtHandlerSmmProfile is the entry point page fault only
+;
+;
+; Stack:
+; +---------------------+
+; + EFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + EIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + EBP +
+; +---------------------+ <-- EBP
+;
+;
+;------------------------------------------------------------------------------
+PageFaultIdtHandlerSmmProfile PROC
+ push 0eh ; Page Fault
+
+ push ebp
+ mov ebp, esp
+
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0fffffff0h
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push eax
+ push ecx
+ push edx
+ push ebx
+ lea ecx, [ebp + 6 * 4]
+ push ecx ; ESP
+ push dword ptr [ebp] ; EBP
+ push esi
+ push edi
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ mov eax, ss
+ push eax
+ movzx eax, word ptr [ebp + 4 * 4]
+ push eax
+ mov eax, ds
+ push eax
+ mov eax, es
+ push eax
+ mov eax, fs
+ push eax
+ mov eax, gs
+ push eax
+
+;; UINT32 Eip;
+ mov eax, [ebp + 3 * 4]
+ push eax
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ xor eax, eax
+ str ax
+ push eax
+ sldt ax
+ push eax
+
+;; UINT32 EFlags;
+ mov eax, [ebp + 5 * 4]
+ push eax
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 208h
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ sub esp, 512
+ mov edi, esp
+ db 0fh, 0aeh, 07h ;fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword ptr [ebp + 2 * 4]
+
+;; call into exception handler
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, dword ptr [ebp + 1 * 4]
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ mov eax, SmiPFHandler
+ call eax
+ add esp, 8
+
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ db 0fh, 0aeh, 0eh ; fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoint in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov cr3, eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop dword ptr [ebp + 5 * 4]
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop dword ptr [ebp + 3 * 4]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop gs
+ pop fs
+ pop es
+ pop ds
+ pop dword ptr [ebp + 4 * 4]
+ pop ss
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop edi
+ pop esi
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+
+ mov esp, ebp
+ pop ebp
+
+; Enable TF bit after page fault handler runs
+ bts dword ptr [esp + 16], 8 ; EFLAGS
+
+ add esp, 8 ; skip INT# & ErrCode
+Return:
+ iretd
+;
+; Page Fault Exception Handler entry when SMM Stack Guard is enabled
+; Executiot starts here after a task switch
+;
+PFHandlerEntry::
+;
+; Get this processor's TSS
+;
+ sub esp, 8
+ sgdt [esp + 2]
+ mov eax, [esp + 4] ; GDT base
+ add esp, 8
+ mov ecx, [eax + TSS_SEL + 2]
+ shl ecx, 8
+ mov cl, [eax + TSS_SEL + 7]
+ ror ecx, 8 ; ecx = TSS base
+
+ mov ebp, esp
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0fffffff0h
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push (IA32_TSS ptr [ecx])._EAX
+ push (IA32_TSS ptr [ecx])._ECX
+ push (IA32_TSS ptr [ecx])._EDX
+ push (IA32_TSS ptr [ecx])._EBX
+ push (IA32_TSS ptr [ecx])._ESP
+ push (IA32_TSS ptr [ecx])._EBP
+ push (IA32_TSS ptr [ecx])._ESI
+ push (IA32_TSS ptr [ecx])._EDI
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movzx eax, (IA32_TSS ptr [ecx])._SS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._CS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._DS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._ES
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._FS
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx])._GS
+ push eax
+
+;; UINT32 Eip;
+ push (IA32_TSS ptr [ecx]).EIP
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0FFFFh
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ mov eax, TSS_SEL
+ push eax
+ movzx eax, (IA32_TSS ptr [ecx]).LDT
+ push eax
+
+;; UINT32 EFlags;
+ push (IA32_TSS ptr [ecx]).EFLAGS
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 208h
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+;; Clear TS bit in CR0 to avoid Device Not Available Exception (#NM)
+;; when executing fxsave/fxrstor instruction
+ clts
+ sub esp, 512
+ mov edi, esp
+ db 0fh, 0aeh, 07h ;fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword ptr [ebp]
+
+;; call into exception handler
+ mov ebx, ecx
+ mov eax, SmiPFHandler
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, 14
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ call eax
+ add esp, 8
+
+ mov ecx, ebx
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ db 0fh, 0aeh, 0eh ; fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoints in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._CR3, eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop (IA32_TSS ptr [ecx]).EFLAGS
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop (IA32_TSS ptr [ecx]).EIP
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop eax
+ mov (IA32_TSS ptr [ecx])._GS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._FS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._ES, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._DS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._CS, ax
+ pop eax
+ mov (IA32_TSS ptr [ecx])._SS, ax
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop (IA32_TSS ptr [ecx])._EDI
+ pop (IA32_TSS ptr [ecx])._ESI
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop (IA32_TSS ptr [ecx])._EBX
+ pop (IA32_TSS ptr [ecx])._EDX
+ pop (IA32_TSS ptr [ecx])._ECX
+ pop (IA32_TSS ptr [ecx])._EAX
+
+ mov esp, ebp
+
+; Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmp FeaturePcdGet (PcdCpuSmmProfileEnable), 0
+ jz @Done2
+
+; Create return context for iretd in stub function
+ mov eax, (IA32_TSS ptr [ecx])._ESP ; Get old stack pointer
+ mov ebx, (IA32_TSS ptr [ecx]).EIP
+ mov [eax - 0ch], ebx ; create EIP in old stack
+ movzx ebx, (IA32_TSS ptr [ecx])._CS
+ mov [eax - 08h], ebx ; create CS in old stack
+ mov ebx, (IA32_TSS ptr [ecx]).EFLAGS
+ bts ebx, 8
+ mov [eax - 04h], ebx ; create eflags in old stack
+ mov eax, (IA32_TSS ptr [ecx])._ESP ; Get old stack pointer
+ sub eax, 0ch ; minus 12 byte
+ mov (IA32_TSS ptr [ecx])._ESP, eax ; Set new stack pointer
+; Replace the EIP of interrupted task with stub function
+ mov eax, PageFaultStubFunction
+ mov (IA32_TSS ptr [ecx]).EIP, eax
+; Jump to the iretd so next page fault handler as a task will start again after iretd.
+@Done2:
+ add esp, 4 ; skip ErrCode
+
+ jmp Return
+PageFaultIdtHandlerSmmProfile ENDP
+
+PageFaultStubFunction PROC
+;
+; we need clean TS bit in CR0 to execute
+; x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.
+;
+ clts
+ iretd
+PageFaultStubFunction ENDP
+
+InitializeIDTSmmStackGuard PROC USES ebx
+;
+; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+; is a Task Gate Descriptor so that when a Page Fault Exception occurs,
+; the processors can use a known good stack in case stack is ran out.
+;
+ lea ebx, _SmiIDT + 14 * 8
+ lea edx, TaskGateDescriptor
+ mov eax, [edx]
+ mov [ebx], eax
+ mov eax, [edx + 4]
+ mov [ebx + 4], eax
+ ret
+InitializeIDTSmmStackGuard ENDP
+
+ END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.S
new file mode 100644
index 0000000000..e8db33a45a
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.S
@@ -0,0 +1,84 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php.
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+# Module Name:
+#
+# SmmInit.S
+#
+# Abstract:
+#
+# Functions for relocating SMBASE's for all processors
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(gSmmCr0)
+ASM_GLOBAL ASM_PFX(gSmmCr3)
+ASM_GLOBAL ASM_PFX(gSmmCr4)
+ASM_GLOBAL ASM_PFX(gcSmmInitTemplate)
+ASM_GLOBAL ASM_PFX(gcSmmInitSize)
+ASM_GLOBAL ASM_PFX(gSmmJmpAddr)
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_GLOBAL ASM_PFX(gSmmInitStack)
+ASM_GLOBAL ASM_PFX(gcSmiInitGdtr)
+
+.equ PROTECT_MODE_CS, 0x08
+.equ PROTECT_MODE_DS, 0x20
+
+ .text
+
+ASM_PFX(gcSmiInitGdtr):
+ .word 0
+ .quad 0
+
+SmmStartup:
+ .byte 0x66,0xb8
+ASM_PFX(gSmmCr3): .space 4
+ movl %eax, %cr3
+ .byte 0x67,0x66
+ lgdt %cs:(ASM_PFX(gcSmiInitGdtr) - SmmStartup)(%ebp)
+ .byte 0x66,0xb8
+ASM_PFX(gSmmCr4): .space 4
+ movl %eax, %cr4
+ .byte 0x66,0xb8
+ASM_PFX(gSmmCr0): .space 4
+ .byte 0xbf, PROTECT_MODE_DS, 0 # mov di, PROTECT_MODE_DS
+ movl %eax, %cr0
+ .byte 0x66,0xea # jmp far [ptr48]
+ASM_PFX(gSmmJmpAddr): .long Start32bit
+ .word PROTECT_MODE_CS
+Start32bit:
+ movl %edi,%ds
+ movl %edi,%es
+ movl %edi,%fs
+ movl %edi,%gs
+ movl %edi,%ss
+ .byte 0xbc # mov esp, imm32
+ASM_PFX(gSmmInitStack): .space 4
+ call ASM_PFX(SmmInitHandler)
+ rsm
+
+ASM_PFX(gcSmmInitTemplate):
+
+_SmmInitTemplate:
+ .byte 0x66
+ movl $SmmStartup, %ebp
+ .byte 0x66, 0x81, 0xed, 0, 0, 3, 0 # sub ebp, 0x30000
+ jmp *%bp # jmp ebp actually
+
+ASM_PFX(gcSmmInitSize): .word . - ASM_PFX(gcSmmInitTemplate)
+
+
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ pushl %eax
+ movl ASM_PFX(mRebasedFlag), %eax
+ movb $1, (%eax)
+ popl %eax
+ jmp *ASM_PFX(mSmmRelocationOriginalAddress)
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.asm
new file mode 100644
index 0000000000..9ba2aebe69
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.asm
@@ -0,0 +1,94 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+; Module Name:
+;
+; SmmInit.Asm
+;
+; Abstract:
+;
+; Functions for relocating SMBASE's for all processors
+;
+;-------------------------------------------------------------------------------
+
+ .686p
+ .xmm
+ .model flat,C
+
+SmmInitHandler PROTO C
+
+EXTERNDEF C gSmmCr0:DWORD
+EXTERNDEF C gSmmCr3:DWORD
+EXTERNDEF C gSmmCr4:DWORD
+EXTERNDEF C gcSmmInitTemplate:BYTE
+EXTERNDEF C gcSmmInitSize:WORD
+EXTERNDEF C gSmmJmpAddr:QWORD
+EXTERNDEF C mRebasedFlag:PTR BYTE
+EXTERNDEF C mSmmRelocationOriginalAddress:DWORD
+EXTERNDEF C gSmmInitStack:DWORD
+EXTERNDEF C gcSmiInitGdtr:FWORD
+
+PROTECT_MODE_CS EQU 08h
+PROTECT_MODE_DS EQU 20h
+
+ .code
+
+gcSmiInitGdtr LABEL FWORD
+ DW 0
+ DQ 0
+
+SmmStartup PROC
+ DB 66h, 0b8h
+gSmmCr3 DD ?
+ mov cr3, eax
+ DB 67h, 66h
+ lgdt fword ptr cs:[ebp + (offset gcSmiInitGdtr - SmmStartup)]
+ DB 66h, 0b8h
+gSmmCr4 DD ?
+ mov cr4, eax
+ DB 66h, 0b8h
+gSmmCr0 DD ?
+ DB 0bfh, PROTECT_MODE_DS, 0 ; mov di, PROTECT_MODE_DS
+ mov cr0, eax
+ DB 66h, 0eah ; jmp far [ptr48]
+gSmmJmpAddr LABEL QWORD
+ DD @32bit
+ DW PROTECT_MODE_CS
+@32bit:
+ mov ds, edi
+ mov es, edi
+ mov fs, edi
+ mov gs, edi
+ mov ss, edi
+ DB 0bch ; mov esp, imm32
+gSmmInitStack DD ?
+ call SmmInitHandler
+ rsm
+SmmStartup ENDP
+
+gcSmmInitTemplate LABEL BYTE
+
+_SmmInitTemplate PROC
+ DB 66h
+ mov ebp, SmmStartup
+ DB 66h, 81h, 0edh, 00h, 00h, 03h, 00 ; sub ebp, 30000h
+ jmp bp ; jmp ebp actually
+_SmmInitTemplate ENDP
+
+gcSmmInitSize DW $ - gcSmmInitTemplate
+
+SmmRelocationSemaphoreComplete PROC
+ push eax
+ mov eax, mRebasedFlag
+ mov byte ptr [eax], 1
+ pop eax
+ jmp [mSmmRelocationOriginalAddress]
+SmmRelocationSemaphoreComplete ENDP
+ END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
new file mode 100644
index 0000000000..85756d0710
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -0,0 +1,80 @@
+/** @file
+IA-32 processor specific functions to enable SMM profile.
+
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (0);
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+ 32-bit firmware does not need it.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+}
+
+/**
+ Clear TF in FLAGS.
+
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+
+**/
+VOID
+ClearTrapFlag (
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ SystemContext.SystemContextIa32->Eflags &= (UINTN) ~BIT8;
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
new file mode 100644
index 0000000000..3e15bffc60
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
@@ -0,0 +1,97 @@
+/** @file
+IA-32 processor specific header file to enable SMM profile.
+
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT32 BTSBufferBase;
+ UINT32 BTSIndex;
+ UINT32 BTSAbsoluteMaximum;
+ UINT32 BTSInterruptThreshold;
+ UINT32 PEBSBufferBase;
+ UINT32 PEBSIndex;
+ UINT32 PEBSAbsoluteMaximum;
+ UINT32 PEBSInterruptThreshold;
+ UINT32 PEBSCounterReset[4];
+ UINT32 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT32 LastBranchFrom;
+ UINT32 LastBranchTo;
+ UINT32 Rsvd0 : 4;
+ UINT32 BranchPredicted : 1;
+ UINT32 Rsvd1 : 27;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT32 Eflags;
+ UINT32 LinearIP;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+ UINT32 Esi;
+ UINT32 Edi;
+ UINT32 Ebp;
+ UINT32 Esp;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 32) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_