summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/X64
diff options
context:
space:
mode:
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/X64')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm40
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c39
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm120
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c58
4 files changed, 252 insertions, 5 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm
new file mode 100644
index 0000000000..61336c9096
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm
@@ -0,0 +1,40 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+; This program and the accompanying materials
+; are licensed and made available under the terms and conditions of the BSD License
+; which accompanies this distribution. The full text of the license may be found at
+; http://opensource.org/licenses/bsd-license.php.
+;
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+;
+;-------------------------------------------------------------------------------
+
+%include "Nasm.inc"
+
+DEFAULT REL
+SECTION .text
+
+global ASM_PFX(DisableCet)
+ASM_PFX(DisableCet):
+
+ ; Skip the pushed data for call
+ mov rax, 1
+ INCSSP_RAX
+
+ mov rax, cr4
+ btr eax, 23 ; clear CET
+ mov cr4, rax
+ ret
+
+global ASM_PFX(EnableCet)
+ASM_PFX(EnableCet):
+
+ mov rax, cr4
+ bts eax, 23 ; set CET
+ mov cr4, rax
+
+ ; use jmp to skip the check for ret
+ pop rax
+ jmp rax
+
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 117502dafa..2c77cb47a4 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -1,7 +1,7 @@
/** @file
Page Fault (#PF) handler for X64 processors
-Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
This program and the accompanying materials
@@ -24,6 +24,24 @@ BOOLEAN m1GPageTableSupport = FALSE;
BOOLEAN mCpuSmmStaticPageTable;
/**
+ Disable CET.
+**/
+VOID
+EFIAPI
+DisableCet (
+ VOID
+ );
+
+/**
+ Enable CET.
+**/
+VOID
+EFIAPI
+EnableCet (
+ VOID
+ );
+
+/**
Check if 1-GByte pages is supported by processor or not.
@retval TRUE 1-GByte pages is supported.
@@ -821,6 +839,7 @@ SmiPFHandler (
DumpCpuContext (InterruptType, SystemContext);
DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
CpuDeadLoop ();
+ goto Exit;
}
//
@@ -855,6 +874,7 @@ SmiPFHandler (
}
}
CpuDeadLoop ();
+ goto Exit;
}
//
@@ -869,6 +889,7 @@ SmiPFHandler (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
);
CpuDeadLoop ();
+ goto Exit;
}
//
@@ -888,6 +909,7 @@ SmiPFHandler (
}
CpuDeadLoop ();
+ goto Exit;
}
if (mCpuSmmStaticPageTable && IsSmmCommBufferForbiddenAddress (PFAddress)) {
@@ -897,6 +919,7 @@ SmiPFHandler (
DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
);
CpuDeadLoop ();
+ goto Exit;
}
}
@@ -930,6 +953,7 @@ SetPageTableAttributes (
UINT64 *L4PageTable;
BOOLEAN IsSplitted;
BOOLEAN PageTableSplitted;
+ BOOLEAN CetEnabled;
//
// Don't do this if
@@ -961,6 +985,13 @@ SetPageTableAttributes (
// Disable write protection, because we need mark page table to be write protected.
// We need *write* page table memory, to mark itself to be *read only*.
//
+ CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
+ if (CetEnabled) {
+ //
+ // CET must be disabled if WP is disabled.
+ //
+ DisableCet();
+ }
AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
do {
@@ -1013,6 +1044,12 @@ SetPageTableAttributes (
// Enable write protection, after page table updated.
//
AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+ if (CetEnabled) {
+ //
+ // re-enable CET.
+ //
+ EnableCet();
+ }
return ;
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
index 807b309b27..73febcea63 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -19,11 +19,26 @@
;-------------------------------------------------------------------------------
%include "StuffRsbNasm.inc"
+%include "Nasm.inc"
;
; Variables referrenced by C code
;
+%define MSR_IA32_S_CET 0x6A2
+%define MSR_IA32_CET_SH_STK_EN 0x1
+%define MSR_IA32_CET_WR_SHSTK_EN 0x2
+%define MSR_IA32_CET_ENDBR_EN 0x4
+%define MSR_IA32_CET_LEG_IW_EN 0x8
+%define MSR_IA32_CET_NO_TRACK_EN 0x10
+%define MSR_IA32_CET_SUPPRESS_DIS 0x20
+%define MSR_IA32_CET_SUPPRESS 0x400
+%define MSR_IA32_CET_TRACKER 0x800
+%define MSR_IA32_PL0_SSP 0x6A4
+%define MSR_IA32_INTERRUPT_SSP_TABLE_ADDR 0x6A8
+
+%define CR4_CET 0x800000
+
%define MSR_IA32_MISC_ENABLE 0x1A0
%define MSR_EFER 0xc0000080
%define MSR_EFER_XD 0x800
@@ -63,6 +78,12 @@ global ASM_PFX(gPatchSmiCr3)
global ASM_PFX(gcSmiHandlerTemplate)
global ASM_PFX(gcSmiHandlerSize)
+extern ASM_PFX(mCetSupported)
+global ASM_PFX(mPatchCetSupported)
+global ASM_PFX(mPatchCetPl0Ssp)
+global ASM_PFX(mPatchCetInterruptSsp)
+global ASM_PFX(mPatchCetInterruptSspTable)
+
DEFAULT REL
SECTION .text
@@ -174,8 +195,71 @@ SmiHandlerIdtrAbsAddr:
mov ax, [rbx + DSC_SS]
mov ss, eax
-_SmiHandler:
- mov rbx, [rsp + 0x8] ; rcx <- CpuIndex
+ mov rbx, [rsp + 0x8] ; rbx <- CpuIndex
+
+; enable CET if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(mPatchCetSupported):
+ cmp al, 0
+ jz CetDone
+
+ mov ecx, MSR_IA32_S_CET
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_PL0_SSP
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_S_CET
+ mov eax, MSR_IA32_CET_SH_STK_EN
+ xor edx, edx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetPl0Ssp):
+ xor edx, edx
+ wrmsr
+ mov rcx, cr0
+ btr ecx, 16 ; clear WP
+ mov cr0, rcx
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSspTable):
+ xor edx, edx
+ wrmsr
+
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSsp):
+ cmp eax, 0
+ jz CetInterruptDone
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+CetInterruptDone:
+
+ mov rcx, cr0
+ bts ecx, 16 ; set WP
+ mov cr0, rcx
+
+ mov eax, 0x668 | CR4_CET
+ mov cr4, rax
+
+ SETSSBSY
+
+CetDone:
;
; Save FP registers
@@ -209,6 +293,31 @@ CpuSmmDebugExitAbsAddr:
add rsp, 0x200
+ mov rax, strict qword 0 ; mov rax, ASM_PFX(mCetSupported)
+mCetSupportedAbsAddr:
+ mov al, [rax]
+ cmp al, 0
+ jz CetDone2
+
+ mov eax, 0x668
+ mov cr4, rax ; disable CET
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ pop rax
+ pop rdx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ pop rax
+ pop rdx
+ wrmsr
+
+ mov ecx, MSR_IA32_S_CET
+ pop rax
+ pop rdx
+ wrmsr
+CetDone2:
+
mov rax, strict qword 0 ; lea rax, [ASM_PFX(mXdSupported)]
mXdSupportedAbsAddr:
mov al, [rax]
@@ -223,6 +332,7 @@ mXdSupportedAbsAddr:
wrmsr
.1:
+
StuffRsb64
rsm
@@ -257,4 +367,8 @@ ASM_PFX(PiSmmCpuSmiEntryFixupAddress):
lea rax, [ASM_PFX(mXdSupported)]
lea rcx, [mXdSupportedAbsAddr]
mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(mCetSupported)]
+ lea rcx, [mCetSupportedAbsAddr]
+ mov qword [rcx - 8], rax
ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
index 89b3f2b725..c28c15b0ba 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
@@ -1,7 +1,7 @@
/** @file
SMM CPU misc functions for x64 arch specific.
-Copyright (c) 2015 - 2018, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2015 - 2019, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -17,6 +17,18 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
EFI_PHYSICAL_ADDRESS mGdtBuffer;
UINTN mGdtBufferSize;
+extern BOOLEAN mCetSupported;
+extern UINTN mSmmShadowStackSize;
+
+X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSspTable;
+UINT32 mCetPl0Ssp;
+UINT32 mCetInterruptSsp;
+UINT32 mCetInterruptSspTable;
+
+UINTN mSmmInterruptSspTables;
+
/**
Initialize IDT for SMM Stack Guard.
@@ -153,3 +165,47 @@ TransferApToSafeState (
ASSERT (FALSE);
}
+/**
+ Initialize the shadow stack related data structure.
+
+ @param CpuIndex The index of CPU.
+ @param ShadowStack The bottom of the shadow stack for this CPU.
+**/
+VOID
+InitShadowStack (
+ IN UINTN CpuIndex,
+ IN VOID *ShadowStack
+ )
+{
+ UINTN SmmShadowStackSize;
+ UINT64 *InterruptSspTable;
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ SmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+ mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
+ DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
+ DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
+ DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ if (mSmmInterruptSspTables == 0) {
+ mSmmInterruptSspTables = (UINTN)AllocateZeroPool(sizeof(UINT64) * 8 * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
+ ASSERT (mSmmInterruptSspTables != 0);
+ DEBUG ((DEBUG_INFO, "mSmmInterruptSspTables - 0x%x\n", mSmmInterruptSspTables));
+ }
+ mCetInterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64));
+ mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof(UINT64) * 8 * CpuIndex);
+ InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable;
+ InterruptSspTable[1] = mCetInterruptSsp;
+ PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
+ PatchInstructionX86 (mPatchCetInterruptSspTable, mCetInterruptSspTable, 4);
+ DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
+ DEBUG ((DEBUG_INFO, "mCetInterruptSspTable - 0x%x\n", mCetInterruptSspTable));
+ }
+ }
+}
+