summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32
diff options
context:
space:
mode:
authorJiewen Yao <jiewen.yao@intel.com>2016-10-23 23:19:52 +0800
committerJiewen Yao <jiewen.yao@intel.com>2016-11-17 16:30:07 +0800
commit717fb60443fbaedfab9a37fd186361b3b9e1ecfe (patch)
treef360f028594e994f452f6e674726bf210643498b /UefiCpuPkg/PiSmmCpuDxeSmm/Ia32
parent28b020b5de1e1bee4a44e2536f71fc96c781863c (diff)
downloadedk2-717fb60443fbaedfab9a37fd186361b3b9e1ecfe.tar.gz
edk2-717fb60443fbaedfab9a37fd186361b3b9e1ecfe.tar.bz2
edk2-717fb60443fbaedfab9a37fd186361b3b9e1ecfe.zip
UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
PiSmmCpuDxeSmm consumes SmmAttributesTable and setup page table: 1) Code region is marked as read-only and Data region is non-executable, if the PE image is 4K aligned. 2) Important data structure is set to RO, such as GDT/IDT. 3) SmmSaveState is set to non-executable, and SmmEntrypoint is set to read-only. 4) If static page is supported, page table is read-only. We use page table to protect other components, and itself. If we use dynamic paging, we can still provide *partial* protection. And hope page table is not modified by other components. The XD enabling code is moved to SmiEntry to let NX take effect. Cc: Jeff Fan <jeff.fan@intel.com> Cc: Feng Tian <feng.tian@intel.com> Cc: Star Zeng <star.zeng@intel.com> Cc: Michael D Kinney <michael.d.kinney@intel.com> Cc: Laszlo Ersek <lersek@redhat.com> Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Jiewen Yao <jiewen.yao@intel.com> Tested-by: Laszlo Ersek <lersek@redhat.com> Reviewed-by: Jeff Fan <jeff.fan@intel.com> Reviewed-by: Michael D Kinney <michael.d.kinney@intel.com>
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/Ia32')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c71
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S75
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm75
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm79
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S226
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm36
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm36
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c35
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c4
9 files changed, 298 insertions, 339 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
index a871bef4e2..65f09e5622 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -58,7 +58,7 @@ SmmInitPageTable (
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
InitializeIDTSmmStackGuard ();
}
- return Gen4GPageTable (0, TRUE);
+ return Gen4GPageTable (TRUE);
}
/**
@@ -99,7 +99,7 @@ SmiPFHandler (
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
(PFAddress >= mCpuHotPlugData.SmrrBase) &&
(PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
- DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
CpuDeadLoop ();
}
@@ -109,7 +109,7 @@ SmiPFHandler (
if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
(PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
- DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
);
@@ -128,3 +128,68 @@ SmiPFHandler (
ReleaseSpinLock (mPFLock);
}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+
+ L3PageTable = (UINT64 *)GetPageTableBase ();
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < 4; Index3++) {
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+
+ return ;
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
index ec5b9a0b04..0c075582c3 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,9 +24,13 @@ ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
ASM_GLOBAL ASM_PFX(gSmiCr3)
ASM_GLOBAL ASM_PFX(gSmiStack)
ASM_GLOBAL ASM_PFX(gSmbase)
+ASM_GLOBAL ASM_PFX(mXdSupported)
ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
+.equ MSR_EFER, 0xc0000080
+.equ MSR_EFER_XD, 0x800
+
.equ DSC_OFFSET, 0xfb00
.equ DSC_GDTPTR, 0x30
.equ DSC_GDTSIZ, 0x38
@@ -122,8 +126,41 @@ L11:
orl $BIT10, %eax
L12: # as cr4.PGE is not set here, refresh cr3
movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ jz L5
+# Load TSS
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
+ movl $TSS_SEGMENT, %eax
+ ltrw %ax
+L5:
+
+# enable NXE if supported
+ .byte 0xb0 # mov al, imm8
+ASM_PFX(mXdSupported): .byte 1
+ cmpb $0, %al
+ jz SkipNxe
+#
+# Check XD disable bit
+#
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+SkipNxe:
+ subl $4, %esp
+NxeDone:
+
movl %cr0, %ebx
- orl $0x080010000, %ebx # enable paging + WP
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
movl %ebx, %cr0
leal DSC_OFFSET(%edi),%ebx
movw DSC_DS(%ebx),%ax
@@ -135,35 +172,39 @@ L12: # as cr4.PGE is not set here, refresh
movw DSC_SS(%ebx),%ax
movl %eax, %ss
- cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
- jz L5
-
-# Load TSS
- movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
- movl $TSS_SEGMENT, %eax
- ltrw %ax
-L5:
-
# jmp _SmiHandler # instruction is not needed
_SmiHandler:
- movl (%esp), %ebx
+ movl 4(%esp), %ebx
pushl %ebx
movl $ASM_PFX(CpuSmmDebugEntry), %eax
call *%eax
- popl %ecx
-
+ addl $4, %esp
+
pushl %ebx
movl $ASM_PFX(SmiRendezvous), %eax
call *%eax
- popl %ecx
+ addl $4, %esp
pushl %ebx
movl $ASM_PFX(CpuSmmDebugExit), %eax
call *%eax
- popl %ecx
-
+ addl $4, %esp
+
+ movl $ASM_PFX(mXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz L16
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+L16:
rsm
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
index ac1a9b48dd..eda1708e3f 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -22,6 +22,10 @@
.model flat,C
.xmm
+MSR_IA32_MISC_ENABLE EQU 1A0h
+MSR_EFER EQU 0c0000080h
+MSR_EFER_XD EQU 0800h
+
DSC_OFFSET EQU 0fb00h
DSC_GDTPTR EQU 30h
DSC_GDTSIZ EQU 38h
@@ -43,6 +47,7 @@ EXTERNDEF gcSmiHandlerSize:WORD
EXTERNDEF gSmiCr3:DWORD
EXTERNDEF gSmiStack:DWORD
EXTERNDEF gSmbase:DWORD
+EXTERNDEF mXdSupported:BYTE
EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
EXTERNDEF gSmiHandlerIdtr:FWORD
@@ -128,8 +133,42 @@ gSmiCr3 DD ?
or eax, BIT10
@@: ; as cr4.PGE is not set here, refresh cr3
mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
+ jz @F
+; Load TSS
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+@@:
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+mXdSupported DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
mov ebx, cr0
- or ebx, 080010000h ; enable paging + WP
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE
mov cr0, ebx
lea ebx, [edi + DSC_OFFSET]
mov ax, [ebx + DSC_DS]
@@ -141,34 +180,38 @@ gSmiCr3 DD ?
mov ax, [ebx + DSC_SS]
mov ss, eax
- cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
- jz @F
-
-; Load TSS
- mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
- mov eax, TSS_SEGMENT
- ltr ax
-@@:
; jmp _SmiHandler ; instruction is not needed
_SmiHandler PROC
- mov ebx, [esp] ; CPU Index
-
+ mov ebx, [esp + 4] ; CPU Index
push ebx
mov eax, CpuSmmDebugEntry
call eax
- pop ecx
+ add esp, 4
push ebx
mov eax, SmiRendezvous
call eax
- pop ecx
-
+ add esp, 4
+
push ebx
mov eax, CpuSmmDebugExit
call eax
- pop ecx
+ add esp, 4
+ mov eax, mXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @f
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+@@:
rsm
_SmiHandler ENDP
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
index 4fb0c13508..d50a3170aa 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
@@ -18,6 +18,10 @@
;
;-------------------------------------------------------------------------------
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
%define DSC_OFFSET 0xfb00
%define DSC_GDTPTR 0x30
%define DSC_GDTSIZ 0x38
@@ -40,6 +44,7 @@ global ASM_PFX(gcSmiHandlerSize)
global ASM_PFX(gSmiCr3)
global ASM_PFX(gSmiStack)
global ASM_PFX(gSmbase)
+global ASM_PFX(mXdSupported)
extern ASM_PFX(gSmiHandlerIdtr)
SECTION .text
@@ -56,7 +61,7 @@ _SmiEntryPoint:
mov ebp, eax ; ebp = GDT base
o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
mov ax, PROTECT_MODE_CS
- mov [cs:bx-0x2],ax
+ mov [cs:bx-0x2],ax
DB 0x66, 0xbf ; mov edi, SMBASE
ASM_PFX(gSmbase): DD 0
lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000]
@@ -66,7 +71,7 @@ ASM_PFX(gSmbase): DD 0
or ebx, 0x23
mov cr0, ebx
jmp dword 0x0:0x0
-_GdtDesc:
+_GdtDesc:
DW 0
DD 0
@@ -115,8 +120,42 @@ ASM_PFX(gSmiCr3): DD 0
or eax, BIT10
.4: ; as cr4.PGE is not set here, refresh cr3
mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
+ jz .6
+; Load TSS
+ mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+.6:
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+ASM_PFX(mXdSupported): DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .5
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.5:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
mov ebx, cr0
- or ebx, 0x080010000 ; enable paging + WP
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
mov cr0, ebx
lea ebx, [edi + DSC_OFFSET]
mov ax, [ebx + DSC_DS]
@@ -128,35 +167,39 @@ ASM_PFX(gSmiCr3): DD 0
mov ax, [ebx + DSC_SS]
mov ss, eax
- cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
- jz .5
-
-; Load TSS
- mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
- mov eax, TSS_SEGMENT
- ltr ax
-.5:
; jmp _SmiHandler ; instruction is not needed
global ASM_PFX(SmiHandler)
ASM_PFX(SmiHandler):
- mov ebx, [esp] ; CPU Index
-
+ mov ebx, [esp + 4] ; CPU Index
push ebx
mov eax, ASM_PFX(CpuSmmDebugEntry)
call eax
- pop ecx
+ add esp, 4
push ebx
mov eax, ASM_PFX(SmiRendezvous)
call eax
- pop ecx
-
+ add esp, 4
+
push ebx
mov eax, ASM_PFX(CpuSmmDebugExit)
call eax
- pop ecx
-
+ add esp, 4
+
+ mov eax, ASM_PFX(mXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .7
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .7
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.7:
rsm
ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
index 4130bf5be5..cf5ef8217f 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,6 +24,7 @@ ASM_GLOBAL ASM_PFX(PageFaultStubFunction)
ASM_GLOBAL ASM_PFX(gSmiMtrrs)
ASM_GLOBAL ASM_PFX(gcSmiIdtr)
ASM_GLOBAL ASM_PFX(gcSmiGdtr)
+ASM_GLOBAL ASM_PFX(gTaskGateDescriptor)
ASM_GLOBAL ASM_PFX(gcPsd)
ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
@@ -236,207 +237,10 @@ ASM_PFX(gcPsd):
ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1
.long NullSeg
-ASM_PFX(gcSmiIdtr): .word IDT_SIZE - 1
- .long _SmiIDT
-
-_SmiIDT:
-# The following segment repeats 32 times:
-# No. 1
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 2
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 3
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 4
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 5
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 6
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 7
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 8
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 9
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 10
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 11
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 12
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 13
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 14
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 15
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 16
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 17
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 18
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 19
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 20
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 21
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 22
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 23
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 24
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 25
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 26
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 27
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 28
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 29
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 30
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 31
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 32
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-
-.equ IDT_SIZE, . - _SmiIDT
-
-TaskGateDescriptor:
+ASM_PFX(gcSmiIdtr): .word 0
+ .long 0
+
+ASM_PFX(gTaskGateDescriptor):
.word 0 # Reserved
.word EXCEPTION_TSS_SEL # TSS Segment selector
.byte 0 # Reserved
@@ -891,21 +695,3 @@ ASM_PFX(PageFaultStubFunction):
#
clts
iret
-
-ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)
-ASM_PFX(InitializeIDTSmmStackGuard):
- pushl %ebx
-#
-# If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
-# is a Task Gate Descriptor so that when a Page Fault Exception occurs,
-# the processors can use a known good stack in case stack ran out.
-#
- leal _SmiIDT + 14 * 8, %ebx
- leal TaskGateDescriptor, %edx
- movl (%edx), %eax
- movl %eax, (%ebx)
- movl 4(%edx), %eax
- movl %eax, 4(%ebx)
-
- popl %ebx
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
index b4eb492da0..7b162f868b 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -26,6 +26,7 @@ EXTERNDEF PageFaultStubFunction:PROC
EXTERNDEF gSmiMtrrs:QWORD
EXTERNDEF gcSmiIdtr:FWORD
EXTERNDEF gcSmiGdtr:FWORD
+EXTERNDEF gTaskGateDescriptor:QWORD
EXTERNDEF gcPsd:BYTE
EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE
@@ -252,20 +253,10 @@ gcSmiGdtr LABEL FWORD
DD offset NullSeg
gcSmiIdtr LABEL FWORD
- DW IDT_SIZE - 1
- DD offset _SmiIDT
-
-_SmiIDT LABEL QWORD
-REPEAT 32
- DW 0 ; Offset 0:15
- DW CODE_SEL ; Segment selector
- DB 0 ; Unused
- DB 8eh ; Interrupt Gate, Present
- DW 0 ; Offset 16:31
- ENDM
-IDT_SIZE = $ - offset _SmiIDT
-
-TaskGateDescriptor LABEL DWORD
+ DW 0
+ DD 0
+
+gTaskGateDescriptor LABEL QWORD
DW 0 ; Reserved
DW EXCEPTION_TSS_SEL ; TSS Segment selector
DB 0 ; Reserved
@@ -720,19 +711,4 @@ PageFaultStubFunction PROC
iretd
PageFaultStubFunction ENDP
-InitializeIDTSmmStackGuard PROC USES ebx
-;
-; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
-; is a Task Gate Descriptor so that when a Page Fault Exception occurs,
-; the processors can use a known good stack in case stack is ran out.
-;
- lea ebx, _SmiIDT + 14 * 8
- lea edx, TaskGateDescriptor
- mov eax, [edx]
- mov [ebx], eax
- mov eax, [edx + 4]
- mov [ebx + 4], eax
- ret
-InitializeIDTSmmStackGuard ENDP
-
END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
index 6a328289a8..4d58999e90 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -24,6 +24,7 @@ extern ASM_PFX(SmiPFHandler)
global ASM_PFX(gcSmiIdtr)
global ASM_PFX(gcSmiGdtr)
+global ASM_PFX(gTaskGateDescriptor)
global ASM_PFX(gcPsd)
SECTION .data
@@ -250,21 +251,10 @@ ASM_PFX(gcSmiGdtr):
DD NullSeg
ASM_PFX(gcSmiIdtr):
- DW IDT_SIZE - 1
- DD _SmiIDT
+ DW 0
+ DD 0
-_SmiIDT:
-%rep 32
- DW 0 ; Offset 0:15
- DW CODE_SEL ; Segment selector
- DB 0 ; Unused
- DB 0x8e ; Interrupt Gate, Present
- DW 0 ; Offset 16:31
-%endrep
-
-IDT_SIZE equ $ - _SmiIDT
-
-TaskGateDescriptor:
+ASM_PFX(gTaskGateDescriptor):
DW 0 ; Reserved
DW EXCEPTION_TSS_SEL ; TSS Segment selector
DB 0 ; Reserved
@@ -717,19 +707,3 @@ ASM_PFX(PageFaultStubFunction):
clts
iretd
-global ASM_PFX(InitializeIDTSmmStackGuard)
-ASM_PFX(InitializeIDTSmmStackGuard):
- push ebx
-;
-; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
-; is a Task Gate Descriptor so that when a Page Fault Exception occurrs,
-; the processors can use a known good stack in case stack is ran out.
-;
- lea ebx, [_SmiIDT + 14 * 8]
- lea edx, [TaskGateDescriptor]
- mov eax, [edx]
- mov [ebx], eax
- mov eax, [edx + 4]
- mov [ebx + 4], eax
- pop ebx
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
index 9760373b47..4281698313 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
@@ -14,6 +14,33 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "PiSmmCpuDxeSmm.h"
+extern UINT64 gTaskGateDescriptor;
+
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+ // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
+ // the processors can use a known good stack in case stack is ran out.
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Uint64 = gTaskGateDescriptor;
+}
+
/**
Initialize Gdt for all processors.
@@ -49,8 +76,10 @@ InitGdt (
gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
@@ -82,8 +111,10 @@ InitGdt (
// Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
//
GdtTssTableSize = gcSmiGdtr.Limit + 1;
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
index 767cb6908b..724cd92c9c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -1,7 +1,7 @@
/** @file
IA-32 processor specific functions to enable SMM profile.
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -24,7 +24,7 @@ InitSmmS3Cr3 (
VOID
)
{
- mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (0, TRUE);
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (TRUE);
return ;
}