Attention is currently required from: Jérémy Compostella.
Benjamin Doron has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/79227?usp=email )
Change subject: [DNM] cpu/x86: Unconditionally compile 64-bit entry code ......................................................................
[DNM] cpu/x86: Unconditionally compile 64-bit entry code
We may want to call long-mode code and return to 32-bit coreboot.
This requires callers to only include entry64.inc if they mean to switch, so, fix qemu-x86 CPU code that did not.
While we're here, only build bootblock static page tables if it's compiled as 64-bit.
DNM: Is it necessary to handle non-transitions? (32-bit -> 32-bit) - If so, perhaps it's best to handle it in mode_switch.h
Change-Id: Idde8609239426de664907df24a016cc35d320553 Signed-off-by: Benjamin Doron benjamin.doron@9elements.com --- M src/cpu/qemu-x86/cache_as_ram_bootblock.S M src/cpu/x86/64bit/Makefile.inc M src/cpu/x86/64bit/entry64.inc M src/cpu/x86/Makefile.inc 4 files changed, 7 insertions(+), 7 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/27/79227/1
diff --git a/src/cpu/qemu-x86/cache_as_ram_bootblock.S b/src/cpu/qemu-x86/cache_as_ram_bootblock.S index 96ed468..052461b 100644 --- a/src/cpu/qemu-x86/cache_as_ram_bootblock.S +++ b/src/cpu/qemu-x86/cache_as_ram_bootblock.S @@ -77,6 +77,7 @@ /* Align the stack and keep aligned for call to bootblock_c_entry() */ and $0xfffffff0, %esp
+#if ENV_X86_64 /* Get page table address */ movl $(CONFIG_ARCH_X86_64_PGTBL_LOC), %eax
@@ -84,7 +85,6 @@ #include <cpu/x86/64bit/entry64.inc>
/* Restore the BIST result and timestamps. */ -#if ENV_X86_64 movd %mm2, %rdi shlq $32, %rdi movd %mm1, %rsi diff --git a/src/cpu/x86/64bit/Makefile.inc b/src/cpu/x86/64bit/Makefile.inc index e1cf743..611538c 100644 --- a/src/cpu/x86/64bit/Makefile.inc +++ b/src/cpu/x86/64bit/Makefile.inc @@ -2,6 +2,8 @@
all_x86-y += mode_switch.S
+ifeq ($(CONFIG_ARCH_BOOTBLOCK_X86_64), y) + # Add --defsym=_start=0 to suppress a linker warning. $(objcbfs)/pt: $(dir)/pt.S $(obj)/config.h $(CC_bootblock) $(CFLAGS_bootblock) $(CPPFLAGS_bootblock) -o $@.tmp $< -Wl,--section-start=.rodata=$(CONFIG_ARCH_X86_64_PGTBL_LOC),--defsym=_start=0 @@ -13,3 +15,5 @@ pagetables-type := raw pagetables-compression := none pagetables-COREBOOT-position := $(CONFIG_ARCH_X86_64_PGTBL_LOC) + +endif diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc index 7a01a4f..50eb8a2 100644 --- a/src/cpu/x86/64bit/entry64.inc +++ b/src/cpu/x86/64bit/entry64.inc @@ -9,9 +9,6 @@ * Clobbers: eax, ecx, edx */
-#if ENV_X86_64 - .code32 - #include <cpu/x86/msr.h> #if defined(__RAMSTAGE__) #include <arch/ram_segs.h> @@ -20,6 +17,7 @@ #endif
/* Caller to provide address of page tables in eax */ +.code32 setup_longmode: /* load identity mapped page tables */ movl %eax, %cr3 @@ -50,4 +48,3 @@ .code64 __longmode_start:
-#endif diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc index 9e9b837..2e084a6 100644 --- a/src/cpu/x86/Makefile.inc +++ b/src/cpu/x86/Makefile.inc @@ -5,8 +5,7 @@ subdirs-y += pae subdirs-$(CONFIG_HAVE_SMI_HANDLER) += smm subdirs-$(CONFIG_UDELAY_TSC) += tsc -# Use ARCH_BOOTBLOCK_X86_64 as a proxy for knowing if 64bit is going to be used -subdirs-$(CONFIG_ARCH_BOOTBLOCK_X86_64) += 64bit +subdirs-y += 64bit subdirs-y += cache
subdirs-$(CONFIG_PARALLEL_MP) += name