Patrick Rudolph has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
arch/x86: Use ENV_X86_64 instead of _x86_64_
Untested.
Change-Id: I152483d24af0512c0ee4fbbe8931b7312e487ac6 Signed-off-by: Patrick Rudolph patrick.rudolph@9elements.com --- M src/arch/x86/assembly_entry.S M src/arch/x86/boot.c M src/arch/x86/c_exit.S M src/arch/x86/c_start.S M src/arch/x86/cpu.c M src/arch/x86/cpu_common.c M src/arch/x86/exception.c M src/arch/x86/exit_car.S M src/arch/x86/gdt_init.S M src/arch/x86/idt.S M src/arch/x86/include/arch/cpu.h M src/arch/x86/include/arch/registers.h M src/arch/x86/memcpy.c M src/arch/x86/wakeup.S M src/cpu/qemu-x86/cache_as_ram_bootblock.S M src/cpu/x86/64bit/entry64.inc M src/cpu/x86/lapic/lapic_cpu_init.c M src/cpu/x86/smm/smm_stub.S M src/cpu/x86/smm/smmhandler.S M src/include/assert.h M src/include/cpu/x86/cr.h M src/soc/intel/denverton_ns/soc_util.c 22 files changed, 42 insertions(+), 42 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/67/44867/1
diff --git a/src/arch/x86/assembly_entry.S b/src/arch/x86/assembly_entry.S index 31670c2..6e73027 100644 --- a/src/arch/x86/assembly_entry.S +++ b/src/arch/x86/assembly_entry.S @@ -15,7 +15,7 @@ #define _STACK_TOP _ecar_stack #endif
-#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -26,7 +26,7 @@ _start:
/* Migrate GDT to this text segment */ -#ifdef __x86_64__ +#if ENV_X86_64 call gdt_init64 #else call gdt_init diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c index 777a0b7..31e7cee 100644 --- a/src/arch/x86/boot.c +++ b/src/arch/x86/boot.c @@ -21,14 +21,14 @@
void arch_prog_run(struct prog *prog) { -#if ENV_RAMSTAGE && defined(__x86_64__) +#if ENV_RAMSTAGE && ENV_X86_64 const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog)); const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog));
/* On x86 coreboot payloads expect to be called in protected mode */ protected_mode_jump(entry, arg); #else -#ifdef __x86_64__ +#if ENV_X86_64 void (*doit)(void *arg); #else /* Ensure the argument is pushed on the stack. */ diff --git a/src/arch/x86/c_exit.S b/src/arch/x86/c_exit.S index e5b9bf8..bb1df28 100644 --- a/src/arch/x86/c_exit.S +++ b/src/arch/x86/c_exit.S @@ -5,7 +5,7 @@ #include <cpu/x86/cr.h>
-#ifdef __x86_64__ +#if ENV_X86_64
/* * Functions to handle mode switches from long mode to protected diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S index c46a45c..47d6597 100644 --- a/src/arch/x86/c_start.S +++ b/src/arch/x86/c_start.S @@ -22,7 +22,7 @@ #endif
.section ".text._start", "ax", @progbits -#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -31,7 +31,7 @@ _start: cli lgdt %cs:gdtaddr -#ifndef __x86_64__ +#if ENV_X86_32 ljmp $RAM_CODE_SEG, $1f #endif 1: movl $RAM_DATA_SEG, %eax @@ -40,7 +40,7 @@ movl %eax, %ss movl %eax, %fs movl %eax, %gs -#ifdef __x86_64__ +#if ENV_X86_64 mov $RAM_CODE_SEG64, %ecx call SetCodeSelector #endif @@ -49,7 +49,7 @@
cld
-#ifdef __x86_64__ +#if ENV_X86_64 mov %rdi, _cbmem_top_ptr #else /* The return argument is at 0(%esp), the calling argument at 4(%esp) */ @@ -110,7 +110,7 @@
.globl gdb_stub_breakpoint gdb_stub_breakpoint: -#ifdef __x86_64__ +#if ENV_X86_64 pop %rax /* Return address */ pushfl push %cs @@ -132,7 +132,7 @@
gdtaddr: .word gdt_end - gdt - 1 -#ifdef __x86_64__ +#if ENV_X86_64 .quad gdt #else .long gdt /* we know the offset */ @@ -169,7 +169,7 @@
/* selgdt 0x18, flat data segment */ .word 0xffff, 0x0000 -#ifdef __x86_64__ +#if ENV_X86_64 .byte 0x00, 0x92, 0xcf, 0x00 #else .byte 0x00, 0x93, 0xcf, 0x00 @@ -203,7 +203,7 @@ * limit */
-#ifdef __x86_64__ +#if ENV_X86_64 /* selgdt 0x48, flat x64 code segment */ .word 0xffff, 0x0000 .byte 0x00, 0x9b, 0xaf, 0x00 @@ -211,7 +211,7 @@ gdt_end:
.section ".text._start", "ax", @progbits -#ifdef __x86_64__ +#if ENV_X86_64 SetCodeSelector: # save rsp because iret will align it to a 16 byte boundary mov %rsp, %rdx diff --git a/src/arch/x86/cpu.c b/src/arch/x86/cpu.c index d054cfe..d44bd2f 100644 --- a/src/arch/x86/cpu.c +++ b/src/arch/x86/cpu.c @@ -14,7 +14,7 @@ #include <device/device.h> #include <smp/spinlock.h>
-#ifndef __x86_64__ +#if ENV_X86_32 /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(uint32_t flag) { @@ -137,7 +137,7 @@
vendor_name[0] = '\0'; /* Unset */
-#ifndef __x86_64__ +#if ENV_X86_32 /* Find the id and vendor_name */ if (!cpu_have_cpuid()) { /* Its a 486 if we can modify the AC flag */ diff --git a/src/arch/x86/cpu_common.c b/src/arch/x86/cpu_common.c index 07de155..77d08c41 100644 --- a/src/arch/x86/cpu_common.c +++ b/src/arch/x86/cpu_common.c @@ -2,7 +2,7 @@
#include <cpu/cpu.h>
-#ifndef __x86_64__ +#if ENV_X86_32 /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(uint32_t flag) { diff --git a/src/arch/x86/exception.c b/src/arch/x86/exception.c index f10c7bf..6c519c3 100644 --- a/src/arch/x86/exception.c +++ b/src/arch/x86/exception.c @@ -491,7 +491,7 @@ logical_processor = cpu_index(); #endif u8 *code; -#ifdef __x86_64__ +#if ENV_X86_64 #define MDUMP_SIZE 0x100 printk(BIOS_EMERG, "CPU Index %d - APIC %d Unexpected Exception:\n" diff --git a/src/arch/x86/exit_car.S b/src/arch/x86/exit_car.S index dc356b2..db36821 100644 --- a/src/arch/x86/exit_car.S +++ b/src/arch/x86/exit_car.S @@ -11,7 +11,7 @@ .long 0 .long 0
-#if defined(__x86_64__) +#if ENV_X86_64 .code64 .macro pop_eax_edx pop %rax @@ -42,13 +42,13 @@ is expected to be implemented in assembly. */
/* Migrate GDT to this text segment */ -#if defined(__x86_64__) +#if ENV_X86_64 call gdt_init64 #else call gdt_init #endif
-#ifdef __x86_64__ +#if ENV_X86_64 mov %rdi, _cbmem_top_ptr #else /* The return argument is at 0(%esp), the calling argument at 4(%esp) */ @@ -67,7 +67,7 @@ call chipset_teardown_car
/* Enable caching if not already enabled. */ -#ifdef __x86_64__ +#if ENV_X86_64 mov %cr0, %rax and $(~(CR0_CD | CR0_NW)), %eax mov %rax, %cr0 @@ -102,7 +102,7 @@ /* Need to align stack to 16 bytes at the call instruction. Therefore account for the 1 push. */ andl $0xfffffff0, %esp -#if defined(__x86_64__) +#if ENV_X86_64 mov %rbp, %rdi #else sub $12, %esp diff --git a/src/arch/x86/gdt_init.S b/src/arch/x86/gdt_init.S index 1558ac6..287f17b 100644 --- a/src/arch/x86/gdt_init.S +++ b/src/arch/x86/gdt_init.S @@ -15,7 +15,7 @@ .word gdt_end - gdt -1 /* compute the table limit */ .long gdt /* we know the offset */
-#ifdef __x86_64__ +#if ENV_X86_64 .code64 .section ".text._gdt64_", "ax", @progbits .globl gdt_init64 diff --git a/src/arch/x86/idt.S b/src/arch/x86/idt.S index 6807056..d763b9e 100644 --- a/src/arch/x86/idt.S +++ b/src/arch/x86/idt.S @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */
.section ".text._idt", "ax", @progbits -#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -109,7 +109,7 @@
.global int_hand int_hand: -#ifdef __x86_64__ +#if ENV_X86_64 /* At this point, on x86-64, on the stack there is: * 0(%rsp) vector * 8(%rsp) error code diff --git a/src/arch/x86/include/arch/cpu.h b/src/arch/x86/include/arch/cpu.h index b622465..e523fc0 100644 --- a/src/arch/x86/include/arch/cpu.h +++ b/src/arch/x86/include/arch/cpu.h @@ -236,7 +236,7 @@ { struct cpu_info *ci; __asm__( -#ifdef __x86_64__ +#if ENV_X86_64 "and %%rsp,%0; " "or %2, %0 " #else diff --git a/src/arch/x86/include/arch/registers.h b/src/arch/x86/include/arch/registers.h index 5f8f9be..7043cd1 100644 --- a/src/arch/x86/include/arch/registers.h +++ b/src/arch/x86/include/arch/registers.h @@ -42,7 +42,7 @@ uint64_t r##A; \ } __packed
-#ifdef __ARCH_x86_64__ +#if ENV_X86_64 struct eregs { QUAD_DOWNTO8(a); QUAD_DOWNTO8(c); diff --git a/src/arch/x86/memcpy.c b/src/arch/x86/memcpy.c index 1cfdf89..93002cd 100644 --- a/src/arch/x86/memcpy.c +++ b/src/arch/x86/memcpy.c @@ -15,7 +15,7 @@ #endif
asm volatile( -#ifdef __x86_64__ +#if ENV_X86_64 "rep ; movsd\n\t" "mov %4,%%rcx\n\t" #else diff --git a/src/arch/x86/wakeup.S b/src/arch/x86/wakeup.S index ae2efe0..dc9510b 100644 --- a/src/arch/x86/wakeup.S +++ b/src/arch/x86/wakeup.S @@ -6,7 +6,7 @@ /* CR0 bits */ #define PE (1 << 0)
-#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -14,7 +14,7 @@
.globl __wakeup __wakeup: -#ifdef __x86_64__ +#if ENV_X86_64 xor %rax,%rax mov %ss, %ax push %rax diff --git a/src/cpu/qemu-x86/cache_as_ram_bootblock.S b/src/cpu/qemu-x86/cache_as_ram_bootblock.S index 415ed24..4489d2c 100644 --- a/src/cpu/qemu-x86/cache_as_ram_bootblock.S +++ b/src/cpu/qemu-x86/cache_as_ram_bootblock.S @@ -28,7 +28,7 @@ and $0xfffffff0, %esp
/* Restore the BIST result and timestamps. */ -#if defined(__x86_64__) +#if ENV_X86_64 movd %mm2, %rdi shld %rdi, 32 movd %mm1, %rsi diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc index 65c0fdc..5049169 100644 --- a/src/cpu/x86/64bit/entry64.inc +++ b/src/cpu/x86/64bit/entry64.inc @@ -9,7 +9,7 @@ * Clobbers: eax, ecx, edx */
-#if defined(__x86_64__) +#if ENV_X86_64 .code32 #if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0 #error pagetables must be 4KiB aligned! diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c index f89d9e3..342c86d 100644 --- a/src/cpu/x86/lapic/lapic_cpu_init.c +++ b/src/cpu/x86/lapic/lapic_cpu_init.c @@ -39,7 +39,7 @@ static inline void setup_secondary_gdt(void) { u16 *gdt_limit; -#ifdef __x86_64__ +#if ENV_X86_64 u64 *gdt_base; #else u32 *gdt_base; diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S index 11ea9a7..dd902d7 100644 --- a/src/cpu/x86/smm/smm_stub.S +++ b/src/cpu/x86/smm/smm_stub.S @@ -178,7 +178,7 @@ /* Align stack to 16 bytes. Another 32 bytes are pushed below. */ andl $0xfffffff0, %esp
-#ifdef __x86_64__ +#if ENV_X86_64 mov %ecx, %edi /* Backup IA32_EFER. Preserves ebx. */ movl $(IA32_EFER), %ecx @@ -197,7 +197,7 @@ * struct arg = { c_handler_params, cpu_num, smm_runtime, canary }; * c_handler(&arg) */ -#ifdef __x86_64__ +#if ENV_X86_64 push %rbx /* uintptr_t *canary */ push $(smm_runtime) push %rcx /* size_t cpu */ diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index 340840f..bb91cb1 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -43,7 +43,7 @@
#define SMM_HANDLER_OFFSET 0x0000
-#if defined(__x86_64__) +#if ENV_X86_64 .bss ia32efer_backup_eax: .long @@ -169,7 +169,7 @@ movl $0xa8000 + 0x7efc, %ebx /* core 0 address */ subl %ebp, %ebx /* subtract core X offset */
-#if defined(__x86_64__) +#if ENV_X86_64 /* Backup IA32_EFER. Preserves ebx. */ movl $(IA32_EFER), %ecx rdmsr @@ -189,7 +189,7 @@ /* Call C handler */ call smi_handler
-#if defined(__x86_64__) +#if ENV_X86_64 /* * The only reason to go back to protected mode is that RSM doesn't restore * MSR registers and MSR IA32_EFER was modified by entering long mode. diff --git a/src/include/assert.h b/src/include/assert.h index 944c677..27d7309 100644 --- a/src/include/assert.h +++ b/src/include/assert.h @@ -80,7 +80,7 @@ *(type *)(uintptr_t)0; \ })
-#ifdef __x86_64__ +#if ENV_X86_64 #define pointer_to_uint32_safe(x) ({ \ if ((uintptr_t)(x) > 0xffffffffUL) \ die("Cast from pointer to uint32_t overflows"); \ diff --git a/src/include/cpu/x86/cr.h b/src/include/cpu/x86/cr.h index 3508505a..c70f35c 100644 --- a/src/include/cpu/x86/cr.h +++ b/src/include/cpu/x86/cr.h @@ -9,7 +9,7 @@
#define COMPILER_BARRIER "memory"
-#ifdef __x86_64__ +#if ENV_X86_64 #define CRx_TYPE uint64_t #define CRx_IN "q" #define CRx_RET "=q" diff --git a/src/soc/intel/denverton_ns/soc_util.c b/src/soc/intel/denverton_ns/soc_util.c index d5b9b34..b4c707d 100644 --- a/src/soc/intel/denverton_ns/soc_util.c +++ b/src/soc/intel/denverton_ns/soc_util.c @@ -274,7 +274,7 @@ unsigned long d0, d1, d2;
asm volatile( -#ifdef __x86_64__ +#if ENV_X86_64 "rep ; movsd\n\t" "mov %4,%%rcx\n\t" #else
HAOUAS Elyes has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
Patch Set 1: Code-Review+1
Arthur Heymans has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
Patch Set 1: Code-Review+2
The BUILD_TIMELESS=1 results for both 32 and 64 bit on Q35 remain the same.
Stefan Reinauer has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
Patch Set 1: Code-Review+2
Attention is currently required from: Patrick Rudolph. Kyösti Mälkki has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
Patch Set 1:
(1 comment)
Patchset:
PS1: Can you rebase this?
Attention is currently required from: Mariusz Szafrański, Suresh Bellampalli, Michal Motyl. Hello build bot (Jenkins), David Guckian, Patrick Georgi, Martin Roth, Stefan Reinauer, Vanessa Eusebio, Arthur Heymans, Patrick Rudolph,
I'd like you to reexamine a change. Please visit
https://review.coreboot.org/c/coreboot/+/44867
to look at the new patch set (#2).
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
arch/x86: Use ENV_X86_64 instead of _x86_64_
Tested on Intel Sandybridge x86_64 and x86_32.
Change-Id: I152483d24af0512c0ee4fbbe8931b7312e487ac6 Signed-off-by: Patrick Rudolph patrick.rudolph@9elements.com --- M src/arch/x86/assembly_entry.S M src/arch/x86/boot.c M src/arch/x86/c_exit.S M src/arch/x86/c_start.S M src/arch/x86/cpu.c M src/arch/x86/cpu_common.c M src/arch/x86/exception.c M src/arch/x86/exit_car.S M src/arch/x86/gdt_init.S M src/arch/x86/idt.S M src/arch/x86/include/arch/cpu.h M src/arch/x86/include/arch/registers.h M src/arch/x86/memcpy.c M src/arch/x86/wakeup.S M src/cpu/qemu-x86/cache_as_ram_bootblock.S M src/cpu/x86/64bit/entry64.inc M src/cpu/x86/lapic/lapic_cpu_init.c M src/cpu/x86/sipi_vector.S M src/cpu/x86/smm/smm_stub.S M src/cpu/x86/smm/smmhandler.S M src/include/assert.h M src/include/cpu/x86/cr.h M src/soc/intel/denverton_ns/soc_util.c 23 files changed, 44 insertions(+), 44 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/67/44867/2
Attention is currently required from: Patrick Rudolph, Mariusz Szafrański, Suresh Bellampalli, Michal Motyl. Angel Pons has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
Patch Set 2: Code-Review+2
(1 comment)
File src/soc/intel/denverton_ns/soc_util.c:
https://review.coreboot.org/c/coreboot/+/44867/comment/6f311bcb_3e0a7378 PS2, Line 243: void *memcpy_s(void *dest, const void *src, size_t n) it's dead code.
Patrick Georgi has submitted this change. ( https://review.coreboot.org/c/coreboot/+/44867 )
Change subject: arch/x86: Use ENV_X86_64 instead of _x86_64_ ......................................................................
arch/x86: Use ENV_X86_64 instead of _x86_64_
Tested on Intel Sandybridge x86_64 and x86_32.
Change-Id: I152483d24af0512c0ee4fbbe8931b7312e487ac6 Signed-off-by: Patrick Rudolph patrick.rudolph@9elements.com Reviewed-on: https://review.coreboot.org/c/coreboot/+/44867 Tested-by: build bot (Jenkins) no-reply@coreboot.org Reviewed-by: Angel Pons th3fanbus@gmail.com --- M src/arch/x86/assembly_entry.S M src/arch/x86/boot.c M src/arch/x86/c_exit.S M src/arch/x86/c_start.S M src/arch/x86/cpu.c M src/arch/x86/cpu_common.c M src/arch/x86/exception.c M src/arch/x86/exit_car.S M src/arch/x86/gdt_init.S M src/arch/x86/idt.S M src/arch/x86/include/arch/cpu.h M src/arch/x86/include/arch/registers.h M src/arch/x86/memcpy.c M src/arch/x86/wakeup.S M src/cpu/qemu-x86/cache_as_ram_bootblock.S M src/cpu/x86/64bit/entry64.inc M src/cpu/x86/lapic/lapic_cpu_init.c M src/cpu/x86/sipi_vector.S M src/cpu/x86/smm/smm_stub.S M src/cpu/x86/smm/smmhandler.S M src/include/assert.h M src/include/cpu/x86/cr.h M src/soc/intel/denverton_ns/soc_util.c 23 files changed, 44 insertions(+), 44 deletions(-)
Approvals: build bot (Jenkins): Verified Angel Pons: Looks good to me, approved
diff --git a/src/arch/x86/assembly_entry.S b/src/arch/x86/assembly_entry.S index 31670c2..6e73027 100644 --- a/src/arch/x86/assembly_entry.S +++ b/src/arch/x86/assembly_entry.S @@ -15,7 +15,7 @@ #define _STACK_TOP _ecar_stack #endif
-#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -26,7 +26,7 @@ _start:
/* Migrate GDT to this text segment */ -#ifdef __x86_64__ +#if ENV_X86_64 call gdt_init64 #else call gdt_init diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c index 777a0b7..31e7cee 100644 --- a/src/arch/x86/boot.c +++ b/src/arch/x86/boot.c @@ -21,14 +21,14 @@
void arch_prog_run(struct prog *prog) { -#if ENV_RAMSTAGE && defined(__x86_64__) +#if ENV_RAMSTAGE && ENV_X86_64 const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog)); const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog));
/* On x86 coreboot payloads expect to be called in protected mode */ protected_mode_jump(entry, arg); #else -#ifdef __x86_64__ +#if ENV_X86_64 void (*doit)(void *arg); #else /* Ensure the argument is pushed on the stack. */ diff --git a/src/arch/x86/c_exit.S b/src/arch/x86/c_exit.S index e5b9bf8..bb1df28 100644 --- a/src/arch/x86/c_exit.S +++ b/src/arch/x86/c_exit.S @@ -5,7 +5,7 @@ #include <cpu/x86/cr.h>
-#ifdef __x86_64__ +#if ENV_X86_64
/* * Functions to handle mode switches from long mode to protected diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S index b7ffddc..3ef03b3 100644 --- a/src/arch/x86/c_start.S +++ b/src/arch/x86/c_start.S @@ -24,7 +24,7 @@ #endif
.section ".text._start", "ax", @progbits -#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -32,7 +32,7 @@ .globl _start _start: cli -#ifdef __x86_64__ +#if ENV_X86_64 movabs $gdtaddr, %rax lgdt (%rax) #else @@ -45,7 +45,7 @@ movl %eax, %ss movl %eax, %fs movl %eax, %gs -#ifdef __x86_64__ +#if ENV_X86_64 mov $RAM_CODE_SEG64, %ecx call SetCodeSelector #endif @@ -54,7 +54,7 @@
cld
-#ifdef __x86_64__ +#if ENV_X86_64 mov %rdi, %rax movabs %rax, _cbmem_top_ptr movabs $_stack, %rdi @@ -117,7 +117,7 @@
.globl gdb_stub_breakpoint gdb_stub_breakpoint: -#ifdef __x86_64__ +#if ENV_X86_64 pop %rax /* Return address */ pushfl push %cs @@ -139,7 +139,7 @@
gdtaddr: .word gdt_end - gdt - 1 -#ifdef __x86_64__ +#if ENV_X86_64 .quad gdt #else .long gdt /* we know the offset */ @@ -176,7 +176,7 @@
/* selgdt 0x18, flat data segment */ .word 0xffff, 0x0000 -#ifdef __x86_64__ +#if ENV_X86_64 .byte 0x00, 0x92, 0xcf, 0x00 #else .byte 0x00, 0x93, 0xcf, 0x00 @@ -210,7 +210,7 @@ * limit */
-#ifdef __x86_64__ +#if ENV_X86_64 /* selgdt 0x48, flat x64 code segment */ .word 0xffff, 0x0000 .byte 0x00, 0x9b, 0xaf, 0x00 @@ -218,7 +218,7 @@ gdt_end:
.section ".text._start", "ax", @progbits -#ifdef __x86_64__ +#if ENV_X86_64 SetCodeSelector: # save rsp because iret will align it to a 16 byte boundary mov %rsp, %rdx diff --git a/src/arch/x86/cpu.c b/src/arch/x86/cpu.c index c929e5e..f4cb83a 100644 --- a/src/arch/x86/cpu.c +++ b/src/arch/x86/cpu.c @@ -13,7 +13,7 @@ #include <device/device.h> #include <smp/spinlock.h>
-#ifndef __x86_64__ +#if ENV_X86_32 /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(uint32_t flag) { @@ -136,7 +136,7 @@
vendor_name[0] = '\0'; /* Unset */
-#ifndef __x86_64__ +#if ENV_X86_32 /* Find the id and vendor_name */ if (!cpu_have_cpuid()) { /* Its a 486 if we can modify the AC flag */ diff --git a/src/arch/x86/cpu_common.c b/src/arch/x86/cpu_common.c index 07de155..77d08c41 100644 --- a/src/arch/x86/cpu_common.c +++ b/src/arch/x86/cpu_common.c @@ -2,7 +2,7 @@
#include <cpu/cpu.h>
-#ifndef __x86_64__ +#if ENV_X86_32 /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(uint32_t flag) { diff --git a/src/arch/x86/exception.c b/src/arch/x86/exception.c index aa94d3f..624226e3 100644 --- a/src/arch/x86/exception.c +++ b/src/arch/x86/exception.c @@ -492,7 +492,7 @@ logical_processor = cpu_index(); #endif u8 *code; -#ifdef __x86_64__ +#if ENV_X86_64 #define MDUMP_SIZE 0x100 printk(BIOS_EMERG, "CPU Index %d - APIC %d Unexpected Exception:\n" diff --git a/src/arch/x86/exit_car.S b/src/arch/x86/exit_car.S index d1b1a53..527a3cb 100644 --- a/src/arch/x86/exit_car.S +++ b/src/arch/x86/exit_car.S @@ -11,7 +11,7 @@ .long 0 .long 0
-#if defined(__x86_64__) +#if ENV_X86_64 .code64 .macro pop_eax_edx pop %rax @@ -42,13 +42,13 @@ is expected to be implemented in assembly. */
/* Migrate GDT to this text segment */ -#if defined(__x86_64__) +#if ENV_X86_64 call gdt_init64 #else call gdt_init #endif
-#ifdef __x86_64__ +#if ENV_X86_64 mov %rdi, %rax movabs %rax, _cbmem_top_ptr #else @@ -61,7 +61,7 @@ cpuid btl $CPUID_FEATURE_CLFLUSH_BIT, %edx jnc skip_clflush -#ifdef __x86_64__ +#if ENV_X86_64 movabs _cbmem_top_ptr, %rax clflush (%rax) #else @@ -73,7 +73,7 @@ call chipset_teardown_car
/* Enable caching if not already enabled. */ -#ifdef __x86_64__ +#if ENV_X86_64 mov %cr0, %rax and $(~(CR0_CD | CR0_NW)), %eax mov %rax, %cr0 @@ -115,7 +115,7 @@ /* Need to align stack to 16 bytes at the call instruction. Therefore account for the 1 push. */ andl $0xfffffff0, %esp -#if defined(__x86_64__) +#if ENV_X86_64 mov %rbp, %rdi #else sub $12, %esp diff --git a/src/arch/x86/gdt_init.S b/src/arch/x86/gdt_init.S index 30b3965..825c23c 100644 --- a/src/arch/x86/gdt_init.S +++ b/src/arch/x86/gdt_init.S @@ -18,7 +18,7 @@ .word gdt_end - gdt -1 /* compute the table limit */ .long gdt /* we know the offset */
-#ifdef __x86_64__ +#if ENV_X86_64 .code64 .section .init._gdt64_, "ax", @progbits .globl gdt_init64 diff --git a/src/arch/x86/idt.S b/src/arch/x86/idt.S index 6807056..d763b9e 100644 --- a/src/arch/x86/idt.S +++ b/src/arch/x86/idt.S @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */
.section ".text._idt", "ax", @progbits -#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -109,7 +109,7 @@
.global int_hand int_hand: -#ifdef __x86_64__ +#if ENV_X86_64 /* At this point, on x86-64, on the stack there is: * 0(%rsp) vector * 8(%rsp) error code diff --git a/src/arch/x86/include/arch/cpu.h b/src/arch/x86/include/arch/cpu.h index 889628d..3fd4c1b 100644 --- a/src/arch/x86/include/arch/cpu.h +++ b/src/arch/x86/include/arch/cpu.h @@ -235,7 +235,7 @@ { struct cpu_info *ci; __asm__( -#ifdef __x86_64__ +#if ENV_X86_64 "and %%rsp,%0; " "or %2, %0 " #else diff --git a/src/arch/x86/include/arch/registers.h b/src/arch/x86/include/arch/registers.h index 5f8f9be..7043cd1 100644 --- a/src/arch/x86/include/arch/registers.h +++ b/src/arch/x86/include/arch/registers.h @@ -42,7 +42,7 @@ uint64_t r##A; \ } __packed
-#ifdef __ARCH_x86_64__ +#if ENV_X86_64 struct eregs { QUAD_DOWNTO8(a); QUAD_DOWNTO8(c); diff --git a/src/arch/x86/memcpy.c b/src/arch/x86/memcpy.c index 1cfdf89..93002cd 100644 --- a/src/arch/x86/memcpy.c +++ b/src/arch/x86/memcpy.c @@ -15,7 +15,7 @@ #endif
asm volatile( -#ifdef __x86_64__ +#if ENV_X86_64 "rep ; movsd\n\t" "mov %4,%%rcx\n\t" #else diff --git a/src/arch/x86/wakeup.S b/src/arch/x86/wakeup.S index ae2efe0..dc9510b 100644 --- a/src/arch/x86/wakeup.S +++ b/src/arch/x86/wakeup.S @@ -6,7 +6,7 @@ /* CR0 bits */ #define PE (1 << 0)
-#ifdef __x86_64__ +#if ENV_X86_64 .code64 #else .code32 @@ -14,7 +14,7 @@
.globl __wakeup __wakeup: -#ifdef __x86_64__ +#if ENV_X86_64 xor %rax,%rax mov %ss, %ax push %rax diff --git a/src/cpu/qemu-x86/cache_as_ram_bootblock.S b/src/cpu/qemu-x86/cache_as_ram_bootblock.S index 07f848a..f828d6f 100644 --- a/src/cpu/qemu-x86/cache_as_ram_bootblock.S +++ b/src/cpu/qemu-x86/cache_as_ram_bootblock.S @@ -83,7 +83,7 @@ #include <cpu/x86/64bit/entry64.inc>
/* Restore the BIST result and timestamps. */ -#if defined(__x86_64__) +#if ENV_X86_64 movd %mm2, %rdi shlq $32, %rdi movd %mm1, %rsi diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc index 7025517..7da68b4 100644 --- a/src/cpu/x86/64bit/entry64.inc +++ b/src/cpu/x86/64bit/entry64.inc @@ -9,7 +9,7 @@ * Clobbers: eax, ecx, edx */
-#if defined(__x86_64__) +#if ENV_X86_64 .code32 #if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0 #error pagetables must be 4KiB aligned! diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c index 295f583..e141ad8 100644 --- a/src/cpu/x86/lapic/lapic_cpu_init.c +++ b/src/cpu/x86/lapic/lapic_cpu_init.c @@ -38,7 +38,7 @@ static inline void setup_secondary_gdt(void) { u16 *gdt_limit; -#ifdef __x86_64__ +#if ENV_X86_64 u64 *gdt_base; #else u32 *gdt_base; diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S index d8156b8..aa95461 100644 --- a/src/cpu/x86/sipi_vector.S +++ b/src/cpu/x86/sipi_vector.S @@ -214,7 +214,7 @@ mov %eax, %cr4 #endif
-#ifdef __x86_64__ +#if ENV_X86_64 /* entry64.inc preserves ebx. */ #include <cpu/x86/64bit/entry64.inc>
diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S index 07be047..44ee7cb 100644 --- a/src/cpu/x86/smm/smm_stub.S +++ b/src/cpu/x86/smm/smm_stub.S @@ -185,7 +185,7 @@ /* Align stack to 16 bytes. Another 32 bytes are pushed below. */ andl $0xfffffff0, %esp
-#ifdef __x86_64__ +#if ENV_X86_64 mov %ecx, %edi /* Backup IA32_EFER. Preserves ebx. */ movl $(IA32_EFER), %ecx @@ -204,7 +204,7 @@ * struct arg = { c_handler_params, cpu_num, smm_runtime, canary }; * c_handler(&arg) */ -#ifdef __x86_64__ +#if ENV_X86_64 push %rbx /* uintptr_t *canary */ push %rcx /* size_t cpu */
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index 3750e52..b7805d0 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -43,7 +43,7 @@
#define SMM_HANDLER_OFFSET 0x0000
-#if defined(__x86_64__) +#if ENV_X86_64 .bss ia32efer_backup_eax: .long 0 @@ -166,7 +166,7 @@ addl $SMM_STACK_SIZE, %ebx movl %ebx, %esp
-#if defined(__x86_64__) +#if ENV_X86_64 /* Backup IA32_EFER. Preserves ebx. */ movl $(IA32_EFER), %ecx rdmsr @@ -180,7 +180,7 @@ /* Call C handler */ call smi_handler
-#if defined(__x86_64__) +#if ENV_X86_64 /* * The only reason to go back to protected mode is that RSM doesn't restore * MSR registers and MSR IA32_EFER was modified by entering long mode. diff --git a/src/include/assert.h b/src/include/assert.h index 829e732..93d6bfc 100644 --- a/src/include/assert.h +++ b/src/include/assert.h @@ -94,7 +94,7 @@ *(type *)(uintptr_t)0; \ })
-#ifdef __x86_64__ +#if ENV_X86_64 #define pointer_to_uint32_safe(x) ({ \ if ((uintptr_t)(x) > 0xffffffffUL) \ die("Cast from pointer to uint32_t overflows"); \ diff --git a/src/include/cpu/x86/cr.h b/src/include/cpu/x86/cr.h index 3508505a..c70f35c 100644 --- a/src/include/cpu/x86/cr.h +++ b/src/include/cpu/x86/cr.h @@ -9,7 +9,7 @@
#define COMPILER_BARRIER "memory"
-#ifdef __x86_64__ +#if ENV_X86_64 #define CRx_TYPE uint64_t #define CRx_IN "q" #define CRx_RET "=q" diff --git a/src/soc/intel/denverton_ns/soc_util.c b/src/soc/intel/denverton_ns/soc_util.c index d5b9b34..b4c707d 100644 --- a/src/soc/intel/denverton_ns/soc_util.c +++ b/src/soc/intel/denverton_ns/soc_util.c @@ -274,7 +274,7 @@ unsigned long d0, d1, d2;
asm volatile( -#ifdef __x86_64__ +#if ENV_X86_64 "rep ; movsd\n\t" "mov %4,%%rcx\n\t" #else