summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Rudolph <siro@das-labor.org>2018-12-09 10:48:59 +0100
committerPatrick Rudolph <siro@das-labor.org>2020-08-19 09:06:43 +0000
commite563815e059ef5881a42e6f8b37094783771d5a7 (patch)
tree7641211d03c315d37176717cbf37588ec4ebd38b
parentad7b2e23ab5954f150a4b2f62378f1e7133e56c9 (diff)
downloadcoreboot-e563815e059ef5881a42e6f8b37094783771d5a7.tar.gz
coreboot-e563815e059ef5881a42e6f8b37094783771d5a7.tar.bz2
coreboot-e563815e059ef5881a42e6f8b37094783771d5a7.zip
arch/x86/boot: Jump to payload in protected mode
* On ARCH_RAMSTAGE_X86_64 jump to the payload in protected mode. * Add a helper function to jump to arbitrary code in protected mode, similar to the real mode call handler. * Doesn't affect existing x86_32 code. * Add a macro to cast pointer to uint32_t that dies if it would overflow on conversion Tested on QEMU Q35 using SeaBIOS as payload. Tested on Lenovo T410 with additional x86_64 patches. Change-Id: I6552ac30f1b6205e08e16d251328e01ce3fbfd14 Signed-off-by: Patrick Rudolph <siro@das-labor.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/30118 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
-rw-r--r--Documentation/arch/x86/index.md4
-rw-r--r--src/arch/x86/Makefile.inc1
-rw-r--r--src/arch/x86/boot.c10
-rw-r--r--src/arch/x86/c_exit.S38
-rw-r--r--src/arch/x86/include/arch/boot/boot.h11
-rw-r--r--src/include/assert.h11
6 files changed, 74 insertions, 1 deletions
diff --git a/Documentation/arch/x86/index.md b/Documentation/arch/x86/index.md
index 81eb51925a1c..7b9e1fcfa011 100644
--- a/Documentation/arch/x86/index.md
+++ b/Documentation/arch/x86/index.md
@@ -15,6 +15,8 @@ In order to add support for x86_64 the following assumptions are made:
* The high dword of pointers is always zero
* The reference implementation is qemu
* The CPU supports 1GiB hugepages
+* x86 payloads are loaded below 4GiB in physical memory and are jumped
+ to in *protected mode*
## Assuptions for all stages using the reference implementation
* 0-4GiB are identity mapped using 2MiB-pages as WB
@@ -47,7 +49,7 @@ At the moment *$n* is 4, which results in identity mapping the lower 4 GiB.
* Add assembly code for long mode - *DONE*
* Add assembly code for SMM - *DONE*
* Add assembly code for postcar stage - *DONE*
-* Add assembly code to return to protected mode - *TODO*
+* Add assembly code to return to protected mode - *DONE*
* Implement reference code for mainboard `emulation/qemu-q35` - *TODO*
## Future work
diff --git a/src/arch/x86/Makefile.inc b/src/arch/x86/Makefile.inc
index 5bba47f0b5e0..a5c330905a8a 100644
--- a/src/arch/x86/Makefile.inc
+++ b/src/arch/x86/Makefile.inc
@@ -245,6 +245,7 @@ ramstage-$(CONFIG_ACPI_BERT) += acpi_bert_storage.c
ramstage-y += boot.c
ramstage-y += post.c
ramstage-y += c_start.S
+ramstage-y += c_exit.S
ramstage-y += cpu.c
ramstage-y += cpu_common.c
ramstage-y += ebda.c
diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c
index db9d69e74daf..777a0b7d904a 100644
--- a/src/arch/x86/boot.c
+++ b/src/arch/x86/boot.c
@@ -1,10 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <arch/boot/boot.h>
#include <commonlib/helpers.h>
#include <console/console.h>
#include <program_loading.h>
#include <ip_checksum.h>
#include <symbols.h>
+#include <assert.h>
int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
{
@@ -19,6 +21,13 @@ int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
void arch_prog_run(struct prog *prog)
{
+#if ENV_RAMSTAGE && defined(__x86_64__)
+ const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog));
+ const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog));
+
+ /* On x86 coreboot payloads expect to be called in protected mode */
+ protected_mode_jump(entry, arg);
+#else
#ifdef __x86_64__
void (*doit)(void *arg);
#else
@@ -27,4 +36,5 @@ void arch_prog_run(struct prog *prog)
#endif
doit = prog_entry(prog);
doit(prog_entry_arg(prog));
+#endif
}
diff --git a/src/arch/x86/c_exit.S b/src/arch/x86/c_exit.S
new file mode 100644
index 000000000000..e5b9bf8d74a6
--- /dev/null
+++ b/src/arch/x86/c_exit.S
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <arch/ram_segs.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/cr.h>
+
+
+#ifdef __x86_64__
+
+ /*
+ * Functions to handle mode switches from long mode to protected
+ * mode.
+ */
+.text
+.code64
+ .section ".text.protected_mode_jump", "ax", @progbits
+ .globl protected_mode_jump
+protected_mode_jump:
+
+ push %rbp
+ mov %rsp, %rbp
+
+ /* Arguments to stack */
+ push %rdi
+ push %rsi
+
+ #include <cpu/x86/64bit/exit32.inc>
+
+ movl -8(%ebp), %eax /* Function to call */
+ movl -16(%ebp), %ebx /* Argument 0 */
+
+ /* Align the stack */
+ andl $0xFFFFFFF0, %esp
+ subl $12, %esp
+ pushl %ebx /* Argument 0 */
+
+ jmp *%eax
+#endif
diff --git a/src/arch/x86/include/arch/boot/boot.h b/src/arch/x86/include/arch/boot/boot.h
index c73591579a6a..1ef927e0fb48 100644
--- a/src/arch/x86/include/arch/boot/boot.h
+++ b/src/arch/x86/include/arch/boot/boot.h
@@ -7,4 +7,15 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_386
+#include <types.h>
+/*
+ * Jump to function in protected mode.
+ * @arg func_ptr Function to jump to in protected mode
+ * @arg Argument to pass to called function
+ *
+ * @noreturn
+ */
+void protected_mode_jump(uint32_t func_ptr,
+ uint32_t argument);
+
#endif /* ASM_I386_BOOT_H */
diff --git a/src/include/assert.h b/src/include/assert.h
index 262b8cc76181..944c67768a14 100644
--- a/src/include/assert.h
+++ b/src/include/assert.h
@@ -80,4 +80,15 @@ extern void _dead_code_assertion_failed(void) __attribute__((noreturn));
*(type *)(uintptr_t)0; \
})
+#ifdef __x86_64__
+#define pointer_to_uint32_safe(x) ({ \
+ if ((uintptr_t)(x) > 0xffffffffUL) \
+ die("Cast from pointer to uint32_t overflows"); \
+ (uint32_t)(uintptr_t)(x); \
+})
+#else
+#define pointer_to_uint32_safe(x) ({ \
+ (uint32_t)(uintptr_t)(x); \
+})
+#endif
#endif // __ASSERT_H__