summaryrefslogtreecommitdiffstats
path: root/ArmPkg/Drivers/CpuDxe/AArch64
diff options
context:
space:
mode:
authorHarry Liebel <Harry.Liebel@arm.com>2013-07-18 18:07:46 +0000
committeroliviermartin <oliviermartin@6f19259b-4bc3-4df7-8a09-765794883524>2013-07-18 18:07:46 +0000
commit25402f5d0660acde3ee382a36b065945251990dc (patch)
tree293fd0d8b229479d4d7cd540e034a10e08c18058 /ArmPkg/Drivers/CpuDxe/AArch64
parent8477cb6e159008d1703381b9f6159e8c90ccc2bf (diff)
downloadedk2-25402f5d0660acde3ee382a36b065945251990dc.tar.gz
edk2-25402f5d0660acde3ee382a36b065945251990dc.tar.bz2
edk2-25402f5d0660acde3ee382a36b065945251990dc.zip
ArmPkg: Added Aarch64 support
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Harry Liebel <Harry.Liebel@arm.com> Signed-off-by: Olivier Martin <olivier.martin@arm.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@14486 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'ArmPkg/Drivers/CpuDxe/AArch64')
-rw-r--r--ArmPkg/Drivers/CpuDxe/AArch64/Exception.c153
-rw-r--r--ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S381
-rw-r--r--ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c189
3 files changed, 723 insertions, 0 deletions
diff --git a/ArmPkg/Drivers/CpuDxe/AArch64/Exception.c b/ArmPkg/Drivers/CpuDxe/AArch64/Exception.c
new file mode 100644
index 0000000000..1aac6a5a91
--- /dev/null
+++ b/ArmPkg/Drivers/CpuDxe/AArch64/Exception.c
@@ -0,0 +1,153 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "CpuDxe.h"
+
+#include <Chipset/AArch64.h>
+
+VOID
+ExceptionHandlersStart (
+ VOID
+ );
+
+VOID
+ExceptionHandlersEnd (
+ VOID
+ );
+
+VOID
+CommonExceptionEntry (
+ VOID
+ );
+
+VOID
+AsmCommonExceptionEntry (
+ VOID
+ );
+
+
+EFI_EXCEPTION_CALLBACK gExceptionHandlers[MAX_ARM_EXCEPTION + 1];
+EFI_EXCEPTION_CALLBACK gDebuggerExceptionHandlers[MAX_ARM_EXCEPTION + 1];
+
+
+
+/**
+ This function registers and enables the handler specified by InterruptHandler for a processor
+ interrupt or exception type specified by InterruptType. If InterruptHandler is NULL, then the
+ handler for the processor interrupt or exception type specified by InterruptType is uninstalled.
+ The installed handler is called once for each processor interrupt or exception.
+
+ @param InterruptType A pointer to the processor's current interrupt state. Set to TRUE if interrupts
+ are enabled and FALSE if interrupts are disabled.
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER that is called
+ when a processor interrupt occurs. If this parameter is NULL, then the handler
+ will be uninstalled.
+
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was
+ previously installed.
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not
+ previously installed.
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.
+
+**/
+EFI_STATUS
+RegisterInterruptHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler
+ )
+{
+ if (InterruptType > MAX_ARM_EXCEPTION) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if ((InterruptHandler != NULL) && (gExceptionHandlers[InterruptType] != NULL)) {
+ return EFI_ALREADY_STARTED;
+ }
+
+ gExceptionHandlers[InterruptType] = InterruptHandler;
+
+ return EFI_SUCCESS;
+}
+
+
+
+VOID
+EFIAPI
+CommonCExceptionHandler (
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ if (ExceptionType <= MAX_AARCH64_EXCEPTION) {
+ if (gExceptionHandlers[ExceptionType]) {
+ gExceptionHandlers[ExceptionType] (ExceptionType, SystemContext);
+ return;
+ }
+ } else {
+ DEBUG ((EFI_D_ERROR, "Unknown exception type %d from %016lx\n", ExceptionType, SystemContext.SystemContextAArch64->ELR));
+ ASSERT (FALSE);
+ }
+
+ DefaultExceptionHandler (ExceptionType, SystemContext);
+}
+
+
+
+EFI_STATUS
+InitializeExceptions (
+ IN EFI_CPU_ARCH_PROTOCOL *Cpu
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN IrqEnabled;
+ BOOLEAN FiqEnabled;
+
+ Status = EFI_SUCCESS;
+ ZeroMem (gExceptionHandlers,sizeof(*gExceptionHandlers));
+
+ //
+ // Disable interrupts
+ //
+ Cpu->GetInterruptState (Cpu, &IrqEnabled);
+ Cpu->DisableInterrupt (Cpu);
+
+ //
+ // EFI does not use the FIQ, but a debugger might so we must disable
+ // as we take over the exception vectors.
+ //
+ FiqEnabled = ArmGetFiqState ();
+ ArmDisableFiq ();
+
+ // AArch64 alignment? The Vector table must be 2k-byte aligned (bottom 11 bits zero)?
+ //DEBUG ((EFI_D_ERROR, "vbar set addr: 0x%016lx\n",(UINTN)ExceptionHandlersStart));
+ //ASSERT(((UINTN)ExceptionHandlersStart & ((1 << 11)-1)) == 0);
+
+ // We do not copy the Exception Table at PcdGet32(PcdCpuVectorBaseAddress). We just set Vector Base Address to point into CpuDxe code.
+ ArmWriteVBar ((UINTN)ExceptionHandlersStart);
+
+ if (FiqEnabled) {
+ ArmEnableFiq ();
+ }
+
+ if (IrqEnabled) {
+ //
+ // Restore interrupt state
+ //
+ Status = Cpu->EnableInterrupt (Cpu);
+ }
+
+ return Status;
+}
diff --git a/ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S b/ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S
new file mode 100644
index 0000000000..981ffd5c3c
--- /dev/null
+++ b/ArmPkg/Drivers/CpuDxe/AArch64/ExceptionSupport.S
@@ -0,0 +1,381 @@
+//
+// Copyright (c) 2011 - 2013 ARM LTD. All rights reserved.<BR>
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD License
+// which accompanies this distribution. The full text of the license may be found at
+// http://opensource.org/licenses/bsd-license.php
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+//
+//------------------------------------------------------------------------------
+
+#include <Library/PcdLib.h>
+#include <AsmMacroIoLibV8.h>
+
+/*
+ This is the stack constructed by the exception handler (low address to high address).
+ X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.
+
+ UINT64 X0; 0x000
+ UINT64 X1; 0x008
+ UINT64 X2; 0x010
+ UINT64 X3; 0x018
+ UINT64 X4; 0x020
+ UINT64 X5; 0x028
+ UINT64 X6; 0x030
+ UINT64 X7; 0x038
+ UINT64 X8; 0x040
+ UINT64 X9; 0x048
+ UINT64 X10; 0x050
+ UINT64 X11; 0x058
+ UINT64 X12; 0x060
+ UINT64 X13; 0x068
+ UINT64 X14; 0x070
+ UINT64 X15; 0x078
+ UINT64 X16; 0x080
+ UINT64 X17; 0x088
+ UINT64 X18; 0x090
+ UINT64 X19; 0x098
+ UINT64 X20; 0x0a0
+ UINT64 X21; 0x0a8
+ UINT64 X22; 0x0b0
+ UINT64 X23; 0x0b8
+ UINT64 X24; 0x0c0
+ UINT64 X25; 0x0c8
+ UINT64 X26; 0x0d0
+ UINT64 X27; 0x0d8
+ UINT64 X28; 0x0e0
+ UINT64 FP; 0x0e8 // x29 - Frame Pointer
+ UINT64 LR; 0x0f0 // x30 - Link Register
+ UINT64 SP; 0x0f8 // x31 - Stack Pointer
+
+ // FP/SIMD Registers. 128bit if used as Q-regs.
+ UINT64 V0[2]; 0x100
+ UINT64 V1[2]; 0x110
+ UINT64 V2[2]; 0x120
+ UINT64 V3[2]; 0x130
+ UINT64 V4[2]; 0x140
+ UINT64 V5[2]; 0x150
+ UINT64 V6[2]; 0x160
+ UINT64 V7[2]; 0x170
+ UINT64 V8[2]; 0x180
+ UINT64 V9[2]; 0x190
+ UINT64 V10[2]; 0x1a0
+ UINT64 V11[2]; 0x1b0
+ UINT64 V12[2]; 0x1c0
+ UINT64 V13[2]; 0x1d0
+ UINT64 V14[2]; 0x1e0
+ UINT64 V15[2]; 0x1f0
+ UINT64 V16[2]; 0x200
+ UINT64 V17[2]; 0x210
+ UINT64 V18[2]; 0x220
+ UINT64 V19[2]; 0x230
+ UINT64 V20[2]; 0x240
+ UINT64 V21[2]; 0x250
+ UINT64 V22[2]; 0x260
+ UINT64 V23[2]; 0x270
+ UINT64 V24[2]; 0x280
+ UINT64 V25[2]; 0x290
+ UINT64 V26[2]; 0x2a0
+ UINT64 V27[2]; 0x2b0
+ UINT64 V28[2]; 0x2c0
+ UINT64 V29[2]; 0x2d0
+ UINT64 V30[2]; 0x2e0
+ UINT64 V31[2]; 0x2f0
+
+ // System Context
+ UINT64 ELR; 0x300 // Exception Link Register
+ UINT64 SPSR; 0x308 // Saved Processor Status Register
+ UINT64 FPSR; 0x310 // Floating Point Status Register
+ UINT64 ESR; 0x318 // EL1 Fault Address Register
+ UINT64 FAR; 0x320 // EL1 Exception syndrome register
+ UINT64 Padding;0x328 // Required for stack alignment
+*/
+
+ASM_GLOBAL ASM_PFX(ExceptionHandlersStart)
+ASM_GLOBAL ASM_PFX(ExceptionHandlersEnd)
+ASM_GLOBAL ASM_PFX(CommonExceptionEntry)
+ASM_GLOBAL ASM_PFX(AsmCommonExceptionEntry)
+ASM_GLOBAL ASM_PFX(CommonCExceptionHandler)
+
+.text
+.align 11
+
+#define GP_CONTEXT_SIZE (32 * 8)
+#define FP_CONTEXT_SIZE (32 * 16)
+#define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)
+
+// Cannot str x31 directly
+#define ALL_GP_REGS \
+ REG_PAIR (x0, x1, 0x000, GP_CONTEXT_SIZE); \
+ REG_PAIR (x2, x3, 0x010, GP_CONTEXT_SIZE); \
+ REG_PAIR (x4, x5, 0x020, GP_CONTEXT_SIZE); \
+ REG_PAIR (x6, x7, 0x030, GP_CONTEXT_SIZE); \
+ REG_PAIR (x8, x9, 0x040, GP_CONTEXT_SIZE); \
+ REG_PAIR (x10, x11, 0x050, GP_CONTEXT_SIZE); \
+ REG_PAIR (x12, x13, 0x060, GP_CONTEXT_SIZE); \
+ REG_PAIR (x14, x15, 0x070, GP_CONTEXT_SIZE); \
+ REG_PAIR (x16, x17, 0x080, GP_CONTEXT_SIZE); \
+ REG_PAIR (x18, x19, 0x090, GP_CONTEXT_SIZE); \
+ REG_PAIR (x20, x21, 0x0a0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x22, x23, 0x0b0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x24, x25, 0x0c0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x26, x27, 0x0d0, GP_CONTEXT_SIZE); \
+ REG_PAIR (x28, x29, 0x0e0, GP_CONTEXT_SIZE); \
+ REG_ONE (x30, 0x0f0, GP_CONTEXT_SIZE);
+
+// In order to save the SP we need to put it somwhere else first.
+// STR only works with XZR/WZR directly
+#define SAVE_SP \
+ add x1, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE; \
+ REG_ONE (x1, 0x0f8, GP_CONTEXT_SIZE);
+
+#define ALL_FP_REGS \
+ REG_PAIR (q0, q1, 0x000, FP_CONTEXT_SIZE); \
+ REG_PAIR (q2, q3, 0x020, FP_CONTEXT_SIZE); \
+ REG_PAIR (q4, q5, 0x040, FP_CONTEXT_SIZE); \
+ REG_PAIR (q6, q7, 0x060, FP_CONTEXT_SIZE); \
+ REG_PAIR (q8, q9, 0x080, FP_CONTEXT_SIZE); \
+ REG_PAIR (q10, q11, 0x0a0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q12, q13, 0x0c0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q14, q15, 0x0e0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q16, q17, 0x100, FP_CONTEXT_SIZE); \
+ REG_PAIR (q18, q19, 0x120, FP_CONTEXT_SIZE); \
+ REG_PAIR (q20, q21, 0x140, FP_CONTEXT_SIZE); \
+ REG_PAIR (q22, q23, 0x160, FP_CONTEXT_SIZE); \
+ REG_PAIR (q24, q25, 0x180, FP_CONTEXT_SIZE); \
+ REG_PAIR (q26, q27, 0x1a0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q28, q29, 0x1c0, FP_CONTEXT_SIZE); \
+ REG_PAIR (q30, q31, 0x1e0, FP_CONTEXT_SIZE);
+
+#define ALL_SYS_REGS \
+ REG_PAIR (x1, x2, 0x000, SYS_CONTEXT_SIZE); \
+ REG_PAIR (x3, x4, 0x010, SYS_CONTEXT_SIZE); \
+ REG_ONE (x5, 0x020, SYS_CONTEXT_SIZE);
+
+//
+// This code gets copied to the ARM vector table
+// VectorTableStart - VectorTableEnd gets copied
+//
+ASM_PFX(ExceptionHandlersStart):
+
+//
+// Current EL with SP0 : 0x0 - 0x180
+//
+.align 7
+ASM_PFX(SynchronousExceptionSP0):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqSP0):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqSP0):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorSP0):
+ b ASM_PFX(SErrorEntry)
+
+//
+// Current EL with SPx: 0x200 - 0x380
+//
+.align 7
+ASM_PFX(SynchronousExceptionSPx):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqSPx):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqSPx):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorSPx):
+ b ASM_PFX(SErrorEntry)
+
+//
+// Lower EL using AArch64 : 0x400 - 0x580
+//
+.align 7
+ASM_PFX(SynchronousExceptionA64):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqA64):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqA64):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorA64):
+ b ASM_PFX(SErrorEntry)
+
+//
+// Lower EL using AArch32 : 0x0 - 0x180
+//
+.align 7
+ASM_PFX(SynchronousExceptionA32):
+ b ASM_PFX(SynchronousExceptionEntry)
+
+.align 7
+ASM_PFX(IrqA32):
+ b ASM_PFX(IrqEntry)
+
+.align 7
+ASM_PFX(FiqA32):
+ b ASM_PFX(FiqEntry)
+
+.align 7
+ASM_PFX(SErrorA32):
+ b ASM_PFX(SErrorEntry)
+
+
+#undef REG_PAIR
+#undef REG_ONE
+#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) stp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)]
+#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) str REG1, [sp, #(OFFSET-CONTEXT_SIZE)]
+
+ASM_PFX(SynchronousExceptionEntry):
+ // Move the stackpointer so we can reach our structure with the str instruction.
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+
+ // Save all the General regs before touching x0 and x1.
+ // This does not save r31(SP) as it is special. We do that later.
+ ALL_GP_REGS
+
+ // Record the tipe of exception that occured.
+ mov x0, #EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS
+
+ // Jump to our general handler to deal with all the common parts and process the exception.
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+ASM_PFX(IrqEntry):
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+ ALL_GP_REGS
+ mov x0, #EXCEPT_AARCH64_IRQ
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+ASM_PFX(FiqEntry):
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+ ALL_GP_REGS
+ mov x0, #EXCEPT_AARCH64_FIQ
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+ASM_PFX(SErrorEntry):
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+ ALL_GP_REGS
+ mov x0, #EXCEPT_AARCH64_SERROR
+ ldr x1, ASM_PFX(CommonExceptionEntry)
+ br x1
+
+
+//
+// This gets patched by the C code that patches in the vector table
+//
+.align 3
+ASM_PFX(CommonExceptionEntry):
+ .dword ASM_PFX(AsmCommonExceptionEntry)
+
+ASM_PFX(ExceptionHandlersEnd):
+
+
+
+//
+// This code runs from CpuDxe driver loaded address. It is patched into
+// CommonExceptionEntry.
+//
+ASM_PFX(AsmCommonExceptionEntry):
+ /* NOTE:
+ We have to break up the save code because the immidiate value to be used
+ with the SP is to big to do it all in one step so we need to shuffle the SP
+ along as we go. (we only have 9bits of immediate to work with) */
+
+ // Save the current Stack pointer before we start modifying it.
+ SAVE_SP
+
+ // Preserve the stack pointer we came in with before we modify it
+ EL1_OR_EL2(x1)
+1:mrs x1, elr_el1 // Exception Link Register
+ mrs x2, spsr_el1 // Saved Processor Status Register 32bit
+ mrs x3, fpsr // Floating point Status Register 32bit
+ mrs x4, esr_el1 // EL1 Exception syndrome register 32bit
+ mrs x5, far_el1 // EL1 Fault Address Register
+ b 3f
+
+2:mrs x1, elr_el2 // Exception Link Register
+ mrs x2, spsr_el2 // Saved Processor Status Register 32bit
+ mrs x3, fpsr // Floating point Status Register 32bit
+ mrs x4, esr_el2 // EL1 Exception syndrome register 32bit
+ mrs x5, far_el2 // EL1 Fault Address Register
+
+ // Adjust SP to save next set
+3:add sp, sp, FP_CONTEXT_SIZE
+
+ // Push FP regs to Stack.
+ ALL_FP_REGS
+
+ // Adjust SP to save next set
+ add sp, sp, SYS_CONTEXT_SIZE
+
+ // Save the SYS regs
+ ALL_SYS_REGS
+
+ // Point to top of struct after all regs saved
+ sub sp, sp, GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE
+
+ // x0 still holds the exception type.
+ // Set x1 to point to the top of our struct on the Stack
+ mov x1, sp
+
+// CommonCExceptionHandler (
+// IN EFI_EXCEPTION_TYPE ExceptionType, R0
+// IN OUT EFI_SYSTEM_CONTEXT SystemContext R1
+// )
+
+ // Call the handler as defined above
+
+ // For now we spin in the handler if we received an abort of some kind.
+ // We do not try to recover.
+ bl ASM_PFX(CommonCExceptionHandler) // Call exception handler
+
+
+// Defines for popping from stack
+
+#undef REG_PAIR
+#undef REG_ONE
+#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) ldp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)]
+
+#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) ldr REG1, [sp, #(OFFSET-CONTEXT_SIZE)]
+
+
+ // pop all regs and return from exception.
+ add sp, sp, GP_CONTEXT_SIZE
+ ALL_GP_REGS
+
+ // Adjust SP to pop next set
+ add sp, sp, FP_CONTEXT_SIZE
+ // Pop FP regs to Stack.
+ ALL_FP_REGS
+
+ // Adjust SP to be where we started from when we came into the handler.
+ // The handler can not change the SP.
+ add sp, sp, SYS_CONTEXT_SIZE
+
+ eret
+
+#undef REG_PAIR
+#undef REG_ONE
+
+dead:
+ b dead
diff --git a/ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c b/ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c
new file mode 100644
index 0000000000..9043afaffa
--- /dev/null
+++ b/ArmPkg/Drivers/CpuDxe/AArch64/Mmu.c
@@ -0,0 +1,189 @@
+/*++
+
+Copyright (c) 2009, Hewlett-Packard Company. All rights reserved.<BR>
+Portions copyright (c) 2010, Apple Inc. All rights reserved.<BR>
+Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>
+
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+
+--*/
+
+#include "CpuDxe.h"
+
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)
+
+STATIC
+UINT64
+GetFirstPageAttribute (
+ IN UINT64 *FirstLevelTableAddress,
+ IN UINTN TableLevel
+ )
+{
+ UINT64 FirstEntry;
+
+ // Get the first entry of the table
+ FirstEntry = *FirstLevelTableAddress;
+
+ if ((FirstEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
+ // Only valid for Levels 0, 1 and 2
+ ASSERT (TableLevel < 3);
+
+ // Get the attribute of the subsequent table
+ return GetFirstPageAttribute ((UINT64*)(FirstEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE), TableLevel + 1);
+ } else if (((FirstEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) ||
+ ((TableLevel == 3) && ((FirstEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3)))
+ {
+ return FirstEntry & TT_ATTR_INDX_MASK;
+ } else {
+ return TT_ATTR_INDX_INVALID;
+ }
+}
+
+STATIC
+UINT64
+GetNextEntryAttribute (
+ IN UINT64 *TableAddress,
+ IN UINTN EntryCount,
+ IN UINTN TableLevel,
+ IN UINT64 BaseAddress,
+ IN OUT UINT32 *PrevEntryAttribute,
+ IN OUT UINT64 *StartGcdRegion
+ )
+{
+ UINTN Index;
+ UINT64 Entry;
+ UINT32 EntryAttribute;
+ UINT32 EntryType;
+ EFI_STATUS Status;
+ UINTN NumberOfDescriptors;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
+
+ // Get the memory space map from GCD
+ MemorySpaceMap = NULL;
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);
+ ASSERT_EFI_ERROR (Status);
+
+ // We cannot get more than 3-level page table
+ ASSERT (TableLevel <= 3);
+
+ // While the top level table might not contain TT_ENTRY_COUNT entries;
+ // the subsequent ones should be filled up
+ for (Index = 0; Index < EntryCount; Index++) {
+ Entry = TableAddress[Index];
+ EntryType = Entry & TT_TYPE_MASK;
+ EntryAttribute = Entry & TT_ATTR_INDX_MASK;
+
+ // If Entry is a Table Descriptor type entry then go through the sub-level table
+ if ((EntryType == TT_TYPE_BLOCK_ENTRY) ||
+ ((TableLevel == 3) && (EntryType == TT_TYPE_BLOCK_ENTRY_LEVEL3))) {
+ if ((*PrevEntryAttribute == TT_ATTR_INDX_INVALID) || (EntryAttribute != *PrevEntryAttribute)) {
+ if (*PrevEntryAttribute != TT_ATTR_INDX_INVALID) {
+ // Update GCD with the last region
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,
+ *StartGcdRegion,
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel)) - 1) - *StartGcdRegion,
+ PageAttributeToGcdAttribute (EntryAttribute));
+ }
+
+ // Start of the new region
+ *StartGcdRegion = BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel));
+ *PrevEntryAttribute = EntryAttribute;
+ } else {
+ continue;
+ }
+ } else if (EntryType == TT_TYPE_TABLE_ENTRY) {
+ // Table Entry type is only valid for Level 0, 1, 2
+ ASSERT (TableLevel < 3);
+
+ // Increase the level number and scan the sub-level table
+ GetNextEntryAttribute ((UINT64*)(Entry & TT_ADDRESS_MASK_DESCRIPTION_TABLE),
+ TT_ENTRY_COUNT, TableLevel + 1,
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel))),
+ PrevEntryAttribute, StartGcdRegion);
+ } else {
+ if (*PrevEntryAttribute != TT_ATTR_INDX_INVALID) {
+ // Update GCD with the last region
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,
+ *StartGcdRegion,
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel)) - 1) - *StartGcdRegion,
+ PageAttributeToGcdAttribute (EntryAttribute));
+
+ // Start of the new region
+ *StartGcdRegion = BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel));
+ *PrevEntryAttribute = TT_ATTR_INDX_INVALID;
+ }
+ }
+ }
+
+ return BaseAddress + (EntryCount * TT_ADDRESS_AT_LEVEL(TableLevel));
+}
+
+EFI_STATUS
+SyncCacheConfig (
+ IN EFI_CPU_ARCH_PROTOCOL *CpuProtocol
+ )
+{
+ EFI_STATUS Status;
+ UINT32 PageAttribute = 0;
+ UINT64 *FirstLevelTableAddress;
+ UINTN TableLevel;
+ UINTN TableCount;
+ UINTN NumberOfDescriptors;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
+ UINTN Tcr;
+ UINTN T0SZ;
+ UINT64 BaseAddressGcdRegion;
+ UINT64 EndAddressGcdRegion;
+
+ // This code assumes MMU is enabled and filed with section translations
+ ASSERT (ArmMmuEnabled ());
+
+ //
+ // Get the memory space map from GCD
+ //
+ MemorySpaceMap = NULL;
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);
+ ASSERT_EFI_ERROR (Status);
+
+ // The GCD implementation maintains its own copy of the state of memory space attributes. GCD needs
+ // to know what the initial memory space attributes are. The CPU Arch. Protocol does not provide a
+ // GetMemoryAttributes function for GCD to get this so we must resort to calling GCD (as if we were
+ // a client) to update its copy of the attributes. This is bad architecture and should be replaced
+ // with a way for GCD to query the CPU Arch. driver of the existing memory space attributes instead.
+
+ // Obtain page table base
+ FirstLevelTableAddress = (UINT64*)(ArmGetTTBR0BaseAddress ());
+
+ // Get Translation Control Register value
+ Tcr = ArmGetTCR ();
+ // Get Address Region Size
+ T0SZ = Tcr & TCR_T0SZ_MASK;
+
+ // Get the level of the first table for the indicated Address Region Size
+ GetRootTranslationTableInfo (T0SZ, &TableLevel, &TableCount);
+
+ // First Attribute of the Page Tables
+ PageAttribute = GetFirstPageAttribute (FirstLevelTableAddress, TableLevel);
+
+ // We scan from the start of the memory map (ie: at the address 0x0)
+ BaseAddressGcdRegion = 0x0;
+ EndAddressGcdRegion = GetNextEntryAttribute (FirstLevelTableAddress,
+ TableCount, TableLevel,
+ BaseAddressGcdRegion,
+ &PageAttribute, &BaseAddressGcdRegion);
+
+ // Update GCD with the last region
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,
+ BaseAddressGcdRegion,
+ EndAddressGcdRegion - BaseAddressGcdRegion,
+ PageAttributeToGcdAttribute (PageAttribute));
+
+ return EFI_SUCCESS;
+}