Patrick Rudolph has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/59874 )
Change subject: src/arch/x86/c_start.S: Add proper x86_64 code ......................................................................
src/arch/x86/c_start.S: Add proper x86_64 code
Don't truncate upper bits in assemlby code and thus allow loading of ramstage above 4GiB. Tested on qemu.
Change-Id: Ifc9b45f69d0b7534b2faacaad0d099cef2667478 Signed-off-by: Patrick Rudolph patrick.rudolph@9elements.com --- M src/arch/x86/c_start.S 1 file changed, 35 insertions(+), 1 deletion(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/74/59874/1
diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S index 9e718fc..c979e81 100644 --- a/src/arch/x86/c_start.S +++ b/src/arch/x86/c_start.S @@ -62,6 +62,24 @@ leal _stack, %edi #endif
+#if ENV_X86_64 + /** poison the stack. Code should not count on the + * stack being full of zeros. This stack poisoning + * recently uncovered a bug in the broadcast SIPI + * code. + */ + movabs $_estack, %rcx + sub %rdi, %rcx + shr $3, %rcx /* it is 64 bit aligned, right? */ + movq $0xDEADBEEFDEADBEEF, %rax + rep + stosq + + /* Set new stack with enforced alignment. */ + movabs $_estack, %rsp + movq $(~(CONFIG_STACK_SIZE-1)), %rax + and %rax, %rsp +#else /** poison the stack. Code should not count on the * stack being full of zeros. This stack poisoning * recently uncovered a bug in the broadcast SIPI @@ -77,13 +95,23 @@ /* Set new stack with enforced alignment. */ movl $_estack, %esp andl $(~(CONFIG_STACK_SIZE-1)), %esp - +#endif push_cpu_info
#if CONFIG(CPU_INFO_V2) /* Allocate the per_cpu_segment_data on the stack */ push_per_cpu_segment_data
+#if ENV_X86_64 + /* + * Update the BSP's per_cpu_segment_descriptor to point to the + * per_cpu_segment_data that was allocated on the stack. + */ + set_segment_descriptor_base $per_cpu_segment_descriptors, %esp + + movabs per_cpu_segment_selector, %rax + mov %eax, %gs +#else /* * Update the BSP's per_cpu_segment_descriptor to point to the * per_cpu_segment_data that was allocated on the stack. @@ -93,6 +121,7 @@ mov per_cpu_segment_selector, %eax mov %eax, %gs #endif +#endif
/* * Now we are finished. Memory is up, data is copied and @@ -101,7 +130,12 @@ */ post_code(POST_PRE_HARDWAREMAIN) /* post 6e */
+#if ENV_X86_64 + movq $0xFFFFFFFFFFFFFFF0, %rax + and %rax, %rsp +#else andl $0xFFFFFFF0, %esp +#endif
#if CONFIG(ASAN_IN_RAMSTAGE) call asan_init