Arthur Heymans has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/56095 )
Change subject: [RFC]cpu/x86/64bit: Turn jumping to long mode into a macro ......................................................................
[RFC]cpu/x86/64bit: Turn jumping to long mode into a macro
This makes it easier to reuse, e.g. if you want to do it twice in one assembly file. Also having the .code64 explicit instead of inside the included file provides more clarity.
Change-Id: Ida861338004187e4e714be41e17c8447fa4cf935 Signed-off-by: Arthur Heymans arthur@aheymans.xyz --- M src/cpu/intel/car/non-evict/cache_as_ram.S M src/cpu/intel/car/p4-netburst/cache_as_ram.S M src/cpu/qemu-x86/cache_as_ram_bootblock.S M src/cpu/x86/64bit/entry64.inc M src/cpu/x86/64bit/mode_switch.S M src/cpu/x86/lapic/secondary.S M src/cpu/x86/sipi_vector.S M src/cpu/x86/smm/smm_stub.S M src/cpu/x86/smm/smmhandler.S M src/soc/intel/common/block/cpu/car/cache_as_ram.S 10 files changed, 40 insertions(+), 22 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/95/56095/1
diff --git a/src/cpu/intel/car/non-evict/cache_as_ram.S b/src/cpu/intel/car/non-evict/cache_as_ram.S index 0451bb4..c5e39c7 100644 --- a/src/cpu/intel/car/non-evict/cache_as_ram.S +++ b/src/cpu/intel/car/non-evict/cache_as_ram.S @@ -3,6 +3,7 @@ #include <cpu/x86/mtrr.h> #include <cpu/x86/cache.h> #include <cpu/x86/post_code.h> +#include <cpu/x86/64bit/entry64.inc>
#define NoEvictMod_MSR 0x2e0 #define BBL_CR_CTL3_MSR 0x11e @@ -211,9 +212,9 @@ andl $0xfffffff0, %esp
#if ENV_X86_64 - - #include <cpu/x86/64bit/entry64.inc> - + setup_longmode 1f +.code64 +1: movd %mm2, %rdi shlq $32, %rdi movd %mm1, %rsi diff --git a/src/cpu/intel/car/p4-netburst/cache_as_ram.S b/src/cpu/intel/car/p4-netburst/cache_as_ram.S index 9ac9e22..9e1478e 100644 --- a/src/cpu/intel/car/p4-netburst/cache_as_ram.S +++ b/src/cpu/intel/car/p4-netburst/cache_as_ram.S @@ -12,6 +12,7 @@ .global bootblock_pre_c_entry
#include <cpu/intel/car/cache_as_ram_symbols.inc> +#include <cpu/x86/64bit/entry64.inc>
.code32 _cache_as_ram_setup: @@ -360,8 +361,9 @@ subl $4, %esp
#if ENV_X86_64 - #include <cpu/x86/64bit/entry64.inc> - + setup_longmode 1f +.code64 +1: movd %mm2, %rdi shlq $32, %rdi /* BIST */ movd %mm1, %rsi diff --git a/src/cpu/qemu-x86/cache_as_ram_bootblock.S b/src/cpu/qemu-x86/cache_as_ram_bootblock.S index 07f848a..6fa6b28 100644 --- a/src/cpu/qemu-x86/cache_as_ram_bootblock.S +++ b/src/cpu/qemu-x86/cache_as_ram_bootblock.S @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/x86/post_code.h> +#include <cpu/x86/64bit/entry64.inc>
#define CBFS_FILE_MAGIC 0 #define CBFS_FILE_LEN (CBFS_FILE_MAGIC + 8) @@ -79,11 +80,11 @@ /* Align the stack and keep aligned for call to bootblock_c_entry() */ and $0xfffffff0, %esp
- /* entry64.inc preserves ebx. */ -#include <cpu/x86/64bit/entry64.inc> - /* Restore the BIST result and timestamps. */ #if defined(__x86_64__) + setup_longmode 1f +.code64 +1: movd %mm2, %rdi shlq $32, %rdi movd %mm1, %rsi diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc index 7025517..d1e133c 100644 --- a/src/cpu/x86/64bit/entry64.inc +++ b/src/cpu/x86/64bit/entry64.inc @@ -22,8 +22,7 @@ #include <arch/rom_segs.h> #endif
- -setup_longmode: +.macro setup_longmode jmp_addr /* Get page table address */ movl $(CONFIG_ARCH_X86_64_PGTBL_LOC), %eax
@@ -48,12 +47,11 @@
/* use long jump to switch to 64-bit code segment */ #if defined(__RAMSTAGE__) - ljmp $RAM_CODE_SEG64, $__longmode_start + ljmp $RAM_CODE_SEG64, $\jmp_addr #else - ljmp $ROM_CODE_SEG64, $__longmode_start + ljmp $ROM_CODE_SEG64, $\jmp_addr
#endif -.code64 -__longmode_start: +.endm
#endif diff --git a/src/cpu/x86/64bit/mode_switch.S b/src/cpu/x86/64bit/mode_switch.S index eea104b..d217817 100644 --- a/src/cpu/x86/64bit/mode_switch.S +++ b/src/cpu/x86/64bit/mode_switch.S @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +#include <cpu/x86/64bit/entry64.inc>
.text .code64 @@ -51,7 +52,9 @@ movl %eax, %ebx
/* Preserves ebx */ - #include <cpu/x86/64bit/entry64.inc> + setup_longmode continue +.code64 +continue:
/* Place return value in rax */ movl %ebx, %eax diff --git a/src/cpu/x86/lapic/secondary.S b/src/cpu/x86/lapic/secondary.S index d36bc9a..b12d078 100644 --- a/src/cpu/x86/lapic/secondary.S +++ b/src/cpu/x86/lapic/secondary.S @@ -2,6 +2,7 @@
#include <cpu/x86/lapic_def.h> #include <arch/ram_segs.h> +#include <cpu/x86/64bit/entry64.inc>
.text .globl _secondary_start, _secondary_start_end, _secondary_gdt_addr @@ -60,7 +61,9 @@
#if ENV_X86_64 /* entry64.inc preserves ebx. */ - #include <cpu/x86/64bit/entry64.inc> + setup_longmode 1f +.code64 +1: movabs secondary_stack, %rax mov %rax, %rsp andl $0xfffffff0, %esp diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S index d8156b8..ca7dcb4 100644 --- a/src/cpu/x86/sipi_vector.S +++ b/src/cpu/x86/sipi_vector.S @@ -6,6 +6,7 @@ #include <arch/ram_segs.h>
#define __RAMSTAGE__ +#include <cpu/x86/64bit/entry64.inc>
/* The SIPI vector is responsible for initializing the APs in the system. It * loads microcode, sets up MSRs, and enables caching before calling into @@ -216,8 +217,9 @@
#ifdef __x86_64__ /* entry64.inc preserves ebx. */ -#include <cpu/x86/64bit/entry64.inc> - + setup_longmode 1f +.code64 +1: mov %rsi, %rdi /* cpu_num */
movabs c_handler, %eax diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S index 07be047..a4ac47a 100644 --- a/src/cpu/x86/smm/smm_stub.S +++ b/src/cpu/x86/smm/smm_stub.S @@ -12,6 +12,7 @@ #include <cpu/x86/cr.h> #include <cpu/x86/msr.h> #include <cpu/x86/lapic_def.h> +#include <cpu/x86/64bit/entry64.inc>
.code32 .section ".module_parameters", "aw", @progbits @@ -194,7 +195,9 @@ movl %edx, -0xc(%ebp)
/* entry64.inc preserves ebx, esi, edi */ -#include <cpu/x86/64bit/entry64.inc> + setup_longmode 1f +.code64 +1: mov %edi, %ecx
#endif diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index 3750e52..9328f79 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -9,6 +9,7 @@
#include <cpu/x86/lapic_def.h> #include <cpu/x86/msr.h> +#include <cpu/x86/64bit/entry64.inc>
/* * +--------------------------------+ 0xaffff @@ -174,8 +175,9 @@ movl %edx, ia32efer_backup_edx
/* Enable long mode. Preserves ebx. */ -#include <cpu/x86/64bit/entry64.inc> - + setup_longmode 1f +.code64 +1: #endif /* Call C handler */ call smi_handler diff --git a/src/soc/intel/common/block/cpu/car/cache_as_ram.S b/src/soc/intel/common/block/cpu/car/cache_as_ram.S index 74957ab..3344d38 100644 --- a/src/soc/intel/common/block/cpu/car/cache_as_ram.S +++ b/src/soc/intel/common/block/cpu/car/cache_as_ram.S @@ -9,6 +9,7 @@ #include <cpu/x86/post_code.h> #include <rules.h> #include <intelblocks/msr.h> +#include <cpu/x86/64bit/entry64.inc>
.section .init, "ax", @progbits
@@ -278,7 +279,9 @@ andl $0xfffffff0, %esp
#if ENV_X86_64 - #include <cpu/x86/64bit/entry64.inc> + setup_longmode 1f +.code64 +1: movd %mm2, %rdi shlq $32, %rdi movd %mm1, %rsi