Patrick Rudolph has submitted this change. ( https://review.coreboot.org/c/coreboot/+/30118 )
Change subject: arch/x86/boot: Jump to payload in protected mode ......................................................................
arch/x86/boot: Jump to payload in protected mode
* On ARCH_RAMSTAGE_X86_64 jump to the payload in protected mode. * Add a helper function to jump to arbitrary code in protected mode, similar to the real mode call handler. * Doesn't affect existing x86_32 code. * Add a macro to cast pointer to uint32_t that dies if it would overflow on conversion
Tested on QEMU Q35 using SeaBIOS as payload. Tested on Lenovo T410 with additional x86_64 patches.
Change-Id: I6552ac30f1b6205e08e16d251328e01ce3fbfd14 Signed-off-by: Patrick Rudolph siro@das-labor.org Reviewed-on: https://review.coreboot.org/c/coreboot/+/30118 Tested-by: build bot (Jenkins) no-reply@coreboot.org Reviewed-by: Arthur Heymans arthur@aheymans.xyz --- M Documentation/arch/x86/index.md M src/arch/x86/Makefile.inc M src/arch/x86/boot.c A src/arch/x86/c_exit.S M src/arch/x86/include/arch/boot/boot.h M src/include/assert.h 6 files changed, 74 insertions(+), 1 deletion(-)
Approvals: build bot (Jenkins): Verified Arthur Heymans: Looks good to me, approved
diff --git a/Documentation/arch/x86/index.md b/Documentation/arch/x86/index.md index 81eb519..7b9e1fc 100644 --- a/Documentation/arch/x86/index.md +++ b/Documentation/arch/x86/index.md @@ -15,6 +15,8 @@ * The high dword of pointers is always zero * The reference implementation is qemu * The CPU supports 1GiB hugepages +* x86 payloads are loaded below 4GiB in physical memory and are jumped + to in *protected mode*
## Assuptions for all stages using the reference implementation * 0-4GiB are identity mapped using 2MiB-pages as WB @@ -47,7 +49,7 @@ * Add assembly code for long mode - *DONE* * Add assembly code for SMM - *DONE* * Add assembly code for postcar stage - *DONE* -* Add assembly code to return to protected mode - *TODO* +* Add assembly code to return to protected mode - *DONE* * Implement reference code for mainboard `emulation/qemu-q35` - *TODO*
## Future work diff --git a/src/arch/x86/Makefile.inc b/src/arch/x86/Makefile.inc index 5bba47f..a5c3309 100644 --- a/src/arch/x86/Makefile.inc +++ b/src/arch/x86/Makefile.inc @@ -245,6 +245,7 @@ ramstage-y += boot.c ramstage-y += post.c ramstage-y += c_start.S +ramstage-y += c_exit.S ramstage-y += cpu.c ramstage-y += cpu_common.c ramstage-y += ebda.c diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c index db9d69e..777a0b7 100644 --- a/src/arch/x86/boot.c +++ b/src/arch/x86/boot.c @@ -1,10 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0-only */
+#include <arch/boot/boot.h> #include <commonlib/helpers.h> #include <console/console.h> #include <program_loading.h> #include <ip_checksum.h> #include <symbols.h> +#include <assert.h>
int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size) { @@ -19,6 +21,13 @@
void arch_prog_run(struct prog *prog) { +#if ENV_RAMSTAGE && defined(__x86_64__) + const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog)); + const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog)); + + /* On x86 coreboot payloads expect to be called in protected mode */ + protected_mode_jump(entry, arg); +#else #ifdef __x86_64__ void (*doit)(void *arg); #else @@ -27,4 +36,5 @@ #endif doit = prog_entry(prog); doit(prog_entry_arg(prog)); +#endif } diff --git a/src/arch/x86/c_exit.S b/src/arch/x86/c_exit.S new file mode 100644 index 0000000..e5b9bf8 --- /dev/null +++ b/src/arch/x86/c_exit.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <arch/ram_segs.h> +#include <cpu/x86/msr.h> +#include <cpu/x86/cr.h> + + +#ifdef __x86_64__ + + /* + * Functions to handle mode switches from long mode to protected + * mode. + */ +.text +.code64 + .section ".text.protected_mode_jump", "ax", @progbits + .globl protected_mode_jump +protected_mode_jump: + + push %rbp + mov %rsp, %rbp + + /* Arguments to stack */ + push %rdi + push %rsi + + #include <cpu/x86/64bit/exit32.inc> + + movl -8(%ebp), %eax /* Function to call */ + movl -16(%ebp), %ebx /* Argument 0 */ + + /* Align the stack */ + andl $0xFFFFFFF0, %esp + subl $12, %esp + pushl %ebx /* Argument 0 */ + + jmp *%eax +#endif diff --git a/src/arch/x86/include/arch/boot/boot.h b/src/arch/x86/include/arch/boot/boot.h index c735915..1ef927e 100644 --- a/src/arch/x86/include/arch/boot/boot.h +++ b/src/arch/x86/include/arch/boot/boot.h @@ -7,4 +7,15 @@ #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386
+#include <types.h> +/* + * Jump to function in protected mode. + * @arg func_ptr Function to jump to in protected mode + * @arg Argument to pass to called function + * + * @noreturn + */ +void protected_mode_jump(uint32_t func_ptr, + uint32_t argument); + #endif /* ASM_I386_BOOT_H */ diff --git a/src/include/assert.h b/src/include/assert.h index 262b8cc..944c677 100644 --- a/src/include/assert.h +++ b/src/include/assert.h @@ -80,4 +80,15 @@ *(type *)(uintptr_t)0; \ })
+#ifdef __x86_64__ +#define pointer_to_uint32_safe(x) ({ \ + if ((uintptr_t)(x) > 0xffffffffUL) \ + die("Cast from pointer to uint32_t overflows"); \ + (uint32_t)(uintptr_t)(x); \ +}) +#else +#define pointer_to_uint32_safe(x) ({ \ + (uint32_t)(uintptr_t)(x); \ +}) +#endif #endif // __ASSERT_H__