summaryrefslogtreecommitdiffstats
path: root/OvmfPkg/Sec
diff options
context:
space:
mode:
authorMin Xu <min.m.xu@intel.com>2021-07-16 16:04:11 +0800
committermergify[bot] <37929162+mergify[bot]@users.noreply.github.com>2022-04-02 08:15:12 +0000
commit2b80269d98626271d52445641f936e4543b333a9 (patch)
treec147ac39d1b719f1602ade641bab4328abcccd94 /OvmfPkg/Sec
parentccca1c2d5d1dcd3a1535062368a8572cb6c19dc6 (diff)
downloadedk2-2b80269d98626271d52445641f936e4543b333a9.tar.gz
edk2-2b80269d98626271d52445641f936e4543b333a9.tar.bz2
edk2-2b80269d98626271d52445641f936e4543b333a9.zip
OvmfPkg: Update Sec to support Tdx
RFC: https://bugzilla.tianocore.org/show_bug.cgi?id=3429 There are below major changes in this commit. 1. SecEntry.nasm In TDX BSP and APs goes to the same entry point in SecEntry.nasm. BSP initialize the temporary stack and then jumps to SecMain, just as legacy Ovmf does. APs spin in a modified mailbox loop using initial mailbox structure. Its structure defition is in OvmfPkg/Include/IndustryStandard/IntelTdx.h. APs wait for command to see if the command is for me. If so execute the command. 2. Sec/SecMain.c When host VMM create the Td guest, the system memory informations are stored in TdHob, which is a memory region described in Tdx metadata. The system memory region in TdHob should be accepted before it can be accessed. So the major task of this patch is to process the TdHobList to accept the memory. After that TDVF follow the standard OVMF flow and jump to PEI phase. PcdUse1GPageTable is set to FALSE by default in OvmfPkgX64.dsc. It gives no chance for Intel TDX to support 1G page table. To support 1G page table this PCD is set to TRUE in OvmfPkgX64.dsc. TDX_GUEST_SUPPORTED is defined in OvmfPkgX64.dsc. This macro wraps the Tdx specific code. TDX only works on X64, so the code is only valid in X64 arch. Cc: Ard Biesheuvel <ardb+tianocore@kernel.org> Cc: Jordan Justen <jordan.l.justen@intel.com> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Erdem Aktas <erdemaktas@google.com> Cc: James Bottomley <jejb@linux.ibm.com> Cc: Jiewen Yao <jiewen.yao@intel.com> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Gerd Hoffmann <kraxel@redhat.com> Acked-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Jiewen Yao <jiewen.yao@intel.com> Signed-off-by: Min Xu <min.m.xu@intel.com>
Diffstat (limited to 'OvmfPkg/Sec')
-rw-r--r--OvmfPkg/Sec/SecMain.c29
-rw-r--r--OvmfPkg/Sec/SecMain.inf3
-rw-r--r--OvmfPkg/Sec/X64/SecEntry.nasm82
3 files changed, 112 insertions, 2 deletions
diff --git a/OvmfPkg/Sec/SecMain.c b/OvmfPkg/Sec/SecMain.c
index 02520e25ab..ca9717a7b5 100644
--- a/OvmfPkg/Sec/SecMain.c
+++ b/OvmfPkg/Sec/SecMain.c
@@ -26,9 +26,8 @@
#include <Library/ExtractGuidedSectionLib.h>
#include <Library/LocalApicLib.h>
#include <Library/CpuExceptionHandlerLib.h>
-
#include <Ppi/TemporaryRamSupport.h>
-
+#include <Library/PlatformInitLib.h>
#include "AmdSev.h"
#define SEC_IDT_ENTRY_COUNT 34
@@ -738,6 +737,20 @@ SecCoreStartupWithStack (
UINT32 Index;
volatile UINT8 *Table;
+ #if defined (TDX_GUEST_SUPPORTED)
+ if (TdIsEnabled ()) {
+ //
+ // For Td guests, the memory map info is in TdHobLib. It should be processed
+ // first so that the memory is accepted. Otherwise access to the unaccepted
+ // memory will trigger tripple fault.
+ //
+ if (ProcessTdxHobList () != EFI_SUCCESS) {
+ CpuDeadLoop ();
+ }
+ }
+
+ #endif
+
//
// To ensure SMM can't be compromised on S3 resume, we must force re-init of
// the BaseExtractGuidedSectionLib. Since this is before library contructors
@@ -756,6 +769,7 @@ SecCoreStartupWithStack (
// we use a loop rather than CopyMem.
//
IdtTableInStack.PeiService = NULL;
+
for (Index = 0; Index < SEC_IDT_ENTRY_COUNT; Index++) {
//
// Declare the local variables that actually move the data elements as
@@ -813,6 +827,17 @@ SecCoreStartupWithStack (
AsmEnableCache ();
}
+ #if defined (TDX_GUEST_SUPPORTED)
+ if (TdIsEnabled ()) {
+ //
+ // InitializeCpuExceptionHandlers () should be called in Td guests so that
+ // #VE exceptions can be handled correctly.
+ //
+ InitializeCpuExceptionHandlers (NULL);
+ }
+
+ #endif
+
DEBUG ((
DEBUG_INFO,
"SecCoreStartupWithStack(0x%x, 0x%x)\n",
diff --git a/OvmfPkg/Sec/SecMain.inf b/OvmfPkg/Sec/SecMain.inf
index 95cf0025e1..4b5b089ccd 100644
--- a/OvmfPkg/Sec/SecMain.inf
+++ b/OvmfPkg/Sec/SecMain.inf
@@ -77,6 +77,9 @@
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaBase
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedStart
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedEnd
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdTdxAcceptPageSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaBase
[FeaturePcd]
gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire
diff --git a/OvmfPkg/Sec/X64/SecEntry.nasm b/OvmfPkg/Sec/X64/SecEntry.nasm
index 1cc680a707..4528fec309 100644
--- a/OvmfPkg/Sec/X64/SecEntry.nasm
+++ b/OvmfPkg/Sec/X64/SecEntry.nasm
@@ -10,12 +10,17 @@
;------------------------------------------------------------------------------
#include <Base.h>
+%include "TdxCommondefs.inc"
DEFAULT REL
SECTION .text
extern ASM_PFX(SecCoreStartupWithStack)
+%macro tdcall 0
+ db 0x66, 0x0f, 0x01, 0xcc
+%endmacro
+
;
; SecCore Entry Point
;
@@ -36,6 +41,32 @@ global ASM_PFX(_ModuleEntryPoint)
ASM_PFX(_ModuleEntryPoint):
;
+ ; Guest type is stored in OVMF_WORK_AREA
+ ;
+ %define OVMF_WORK_AREA FixedPcdGet32 (PcdOvmfWorkAreaBase)
+ %define VM_GUEST_TYPE_TDX 2
+ mov eax, OVMF_WORK_AREA
+ cmp byte[eax], VM_GUEST_TYPE_TDX
+ jne InitStack
+
+ mov rax, TDCALL_TDINFO
+ tdcall
+
+ ;
+ ; R8 [31:0] NUM_VCPUS
+ ; [63:32] MAX_VCPUS
+ ; R9 [31:0] VCPU_INDEX
+ ; Td Guest set the VCPU0 as the BSP, others are the APs
+ ; APs jump to spinloop and get released by DXE's MpInitLib
+ ;
+ mov rax, r9
+ and rax, 0xffff
+ test rax, rax
+ jne ParkAp
+
+InitStack:
+
+ ;
; Fill the temporary RAM with the initial stack value.
; The loop below will seed the heap as well, but that's harmless.
;
@@ -67,3 +98,54 @@ ASM_PFX(_ModuleEntryPoint):
sub rsp, 0x20
call ASM_PFX(SecCoreStartupWithStack)
+ ;
+ ; Note: BSP never gets here. APs will be unblocked by DXE
+ ;
+ ; R8 [31:0] NUM_VCPUS
+ ; [63:32] MAX_VCPUS
+ ; R9 [31:0] VCPU_INDEX
+ ;
+ParkAp:
+
+ mov rbp, r9
+
+.do_wait_loop:
+ mov rsp, FixedPcdGet32 (PcdOvmfSecGhcbBackupBase)
+
+ ;
+ ; register itself in [rsp + CpuArrivalOffset]
+ ;
+ mov rax, 1
+ lock xadd dword [rsp + CpuArrivalOffset], eax
+ inc eax
+
+.check_arrival_cnt:
+ cmp eax, r8d
+ je .check_command
+ mov eax, dword[rsp + CpuArrivalOffset]
+ jmp .check_arrival_cnt
+
+.check_command:
+ mov eax, dword[rsp + CommandOffset]
+ cmp eax, MpProtectedModeWakeupCommandNoop
+ je .check_command
+
+ cmp eax, MpProtectedModeWakeupCommandWakeup
+ je .do_wakeup
+
+ ; Don't support this command, so ignore
+ jmp .check_command
+
+.do_wakeup:
+ ;
+ ; BSP sets these variables before unblocking APs
+ ; RAX: WakeupVectorOffset
+ ; RBX: Relocated mailbox address
+ ; RBP: vCpuId
+ ;
+ mov rax, 0
+ mov eax, dword[rsp + WakeupVectorOffset]
+ mov rbx, [rsp + WakeupArgsRelocatedMailBox]
+ nop
+ jmp rax
+ jmp $