diff options
Diffstat (limited to 'src/cpu/x86')
-rw-r--r-- | src/cpu/x86/64bit/entry64.inc | 2 | ||||
-rw-r--r-- | src/cpu/x86/lapic/lapic_cpu_init.c | 2 | ||||
-rw-r--r-- | src/cpu/x86/sipi_vector.S | 2 | ||||
-rw-r--r-- | src/cpu/x86/smm/smm_stub.S | 4 | ||||
-rw-r--r-- | src/cpu/x86/smm/smmhandler.S | 6 |
5 files changed, 8 insertions, 8 deletions
diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc index 70255173f1..7da68b47f9 100644 --- a/src/cpu/x86/64bit/entry64.inc +++ b/src/cpu/x86/64bit/entry64.inc @@ -9,7 +9,7 @@ * Clobbers: eax, ecx, edx */ -#if defined(__x86_64__) +#if ENV_X86_64 .code32 #if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0 #error pagetables must be 4KiB aligned! diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c index 295f583b27..e141ad8ec9 100644 --- a/src/cpu/x86/lapic/lapic_cpu_init.c +++ b/src/cpu/x86/lapic/lapic_cpu_init.c @@ -38,7 +38,7 @@ static int lowmem_backup_size; static inline void setup_secondary_gdt(void) { u16 *gdt_limit; -#ifdef __x86_64__ +#if ENV_X86_64 u64 *gdt_base; #else u32 *gdt_base; diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S index d8156b88a8..aa95461ae8 100644 --- a/src/cpu/x86/sipi_vector.S +++ b/src/cpu/x86/sipi_vector.S @@ -214,7 +214,7 @@ load_msr: mov %eax, %cr4 #endif -#ifdef __x86_64__ +#if ENV_X86_64 /* entry64.inc preserves ebx. */ #include <cpu/x86/64bit/entry64.inc> diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S index 07be047a36..44ee7cb327 100644 --- a/src/cpu/x86/smm/smm_stub.S +++ b/src/cpu/x86/smm/smm_stub.S @@ -185,7 +185,7 @@ apicid_end: /* Align stack to 16 bytes. Another 32 bytes are pushed below. */ andl $0xfffffff0, %esp -#ifdef __x86_64__ +#if ENV_X86_64 mov %ecx, %edi /* Backup IA32_EFER. Preserves ebx. */ movl $(IA32_EFER), %ecx @@ -204,7 +204,7 @@ apicid_end: * struct arg = { c_handler_params, cpu_num, smm_runtime, canary }; * c_handler(&arg) */ -#ifdef __x86_64__ +#if ENV_X86_64 push %rbx /* uintptr_t *canary */ push %rcx /* size_t cpu */ diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index 3750e5224a..b7805d06ab 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -43,7 +43,7 @@ #define SMM_HANDLER_OFFSET 0x0000 -#if defined(__x86_64__) +#if ENV_X86_64 .bss ia32efer_backup_eax: .long 0 @@ -166,7 +166,7 @@ untampered_lapic: addl $SMM_STACK_SIZE, %ebx movl %ebx, %esp -#if defined(__x86_64__) +#if ENV_X86_64 /* Backup IA32_EFER. Preserves ebx. */ movl $(IA32_EFER), %ecx rdmsr @@ -180,7 +180,7 @@ untampered_lapic: /* Call C handler */ call smi_handler -#if defined(__x86_64__) +#if ENV_X86_64 /* * The only reason to go back to protected mode is that RSM doesn't restore * MSR registers and MSR IA32_EFER was modified by entering long mode. |