summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Rudolph <siro@das-labor.org>2019-10-25 08:09:33 +0200
committerAaron Durbin <adurbin@chromium.org>2019-10-29 19:41:19 +0000
commit776da0872efcc58543afbe32fc55202521ef0014 (patch)
tree8c7d4a952693a77634267779293a2458e51fdd84
parent5f2576089fca9b5b7c00d1dde1223d80fe9c9e2f (diff)
arch/x86/*.S: use defines instead of hardcoded values
As preparation for x86_64 clean the assembly code and introduce arch/ram_segs.h similar to existing arch/rom_segs.h. Replace open coded segment values with the defines from the new header. Change-Id: Ib006cd4df59951335506b8153e9347450ec3403e Signed-off-by: Patrick Rudolph <siro@das-labor.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/36321 Reviewed-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-by: Aaron Durbin <adurbin@chromium.org> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
-rw-r--r--src/arch/x86/c_start.S7
-rw-r--r--src/arch/x86/include/arch/ram_segs.h25
-rw-r--r--src/cpu/x86/lapic/secondary.S7
-rw-r--r--src/cpu/x86/sipi_vector.S9
-rw-r--r--src/device/oprom/realmode/x86_asm.S26
5 files changed, 50 insertions, 24 deletions
diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S
index 32b848df9b..43d78020e1 100644
--- a/src/arch/x86/c_start.S
+++ b/src/arch/x86/c_start.S
@@ -12,6 +12,7 @@
*/
#include <cpu/x86/post_code.h>
+#include <arch/ram_segs.h>
/* Place the stack in the bss section. It's not necessary to define it in the
* the linker script. */
@@ -42,16 +43,16 @@ _start:
cli
lgdt %cs:gdtaddr
#ifndef __x86_64__
- ljmp $0x10, $1f
+ ljmp $RAM_CODE_SEG, $1f
#endif
-1: movl $0x18, %eax
+1: movl $RAM_DATA_SEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
movl %eax, %fs
movl %eax, %gs
#ifdef __x86_64__
- mov $0x48, %ecx
+ mov $RAM_CODE_SEG64, %ecx
call SetCodeSelector
#endif
diff --git a/src/arch/x86/include/arch/ram_segs.h b/src/arch/x86/include/arch/ram_segs.h
new file mode 100644
index 0000000000..39d0c64896
--- /dev/null
+++ b/src/arch/x86/include/arch/ram_segs.h
@@ -0,0 +1,25 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef RAM_SEGS_H
+#define RAM_SEGS_H
+
+#define RAM_CODE_SEG 0x10
+#define RAM_DATA_SEG 0x18
+#define RAM_CODE16_SEG 0x28
+#define RAM_DATA16_SEG 0x30
+#define RAM_CODE_ACPI_SEG 0x38
+#define RAM_DATA_ACPI_SEG 0x40
+#define RAM_CODE_SEG64 0x48
+
+#endif /* RAM_SEGS_H */
diff --git a/src/cpu/x86/lapic/secondary.S b/src/cpu/x86/lapic/secondary.S
index 48360ad532..09cd6f7c30 100644
--- a/src/cpu/x86/lapic/secondary.S
+++ b/src/cpu/x86/lapic/secondary.S
@@ -13,6 +13,7 @@
#include <cpu/x86/mtrr.h>
#include <cpu/x86/lapic_def.h>
+#include <arch/ram_segs.h>
.text
.globl _secondary_start, _secondary_start_end, _secondary_gdt_addr
@@ -38,7 +39,7 @@ _secondary_start:
orl $0x60000001, %eax /* CD, NW, PE = 1 */
movl %eax, %cr0
- ljmpl $0x10, $__ap_protected_start
+ ljmpl $RAM_CODE_SEG, $__ap_protected_start
/* This will get filled in by C code. */
_secondary_gdt_addr:
@@ -51,11 +52,11 @@ _secondary_start_end:
ap_protected_start:
.code32
lgdt gdtaddr
- ljmpl $0x10, $__ap_protected_start
+ ljmpl $RAM_CODE_SEG, $__ap_protected_start
__ap_protected_start:
- movw $0x18, %ax
+ movw $RAM_DATA_SEG, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S
index edc1e779be..f75a1c9815 100644
--- a/src/cpu/x86/sipi_vector.S
+++ b/src/cpu/x86/sipi_vector.S
@@ -15,15 +15,12 @@
#include <cpu/x86/cr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/x86/msr.h>
+#include <arch/ram_segs.h>
/* The SIPI vector is responsible for initializing the APs in the system. It
* loads microcode, sets up MSRs, and enables caching before calling into
* C code. */
-/* These segment selectors need to match the gdt entries in c_start.S. */
-#define CODE_SEG 0x10
-#define DATA_SEG 0x18
-
.section ".module_parameters", "aw", @progbits
ap_start_params:
gdtaddr:
@@ -83,10 +80,10 @@ _start:
orl $CR0_SET_FLAGS, %eax
movl %eax, %cr0
- ljmpl $CODE_SEG, $1f
+ ljmpl $RAM_CODE_SEG, $1f
1:
.code32
- movw $DATA_SEG, %ax
+ movw $RAM_DATA_SEG, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
diff --git a/src/device/oprom/realmode/x86_asm.S b/src/device/oprom/realmode/x86_asm.S
index 8c9e12b143..d68fdc5fca 100644
--- a/src/device/oprom/realmode/x86_asm.S
+++ b/src/device/oprom/realmode/x86_asm.S
@@ -14,6 +14,8 @@
#define REALMODE_BASE 0x600
#define RELOCATED(x) (x - __realmode_code + REALMODE_BASE)
+#include <arch/ram_segs.h>
+
/* CR0 bits */
#define PE (1 << 0)
@@ -106,7 +108,7 @@ __realmode_call:
movl %eax, __registers + 20 /* edi */
/* Activate the right segment descriptor real mode. */
- ljmp $0x28, $RELOCATED(1f)
+ ljmp $RAM_CODE16_SEG, $RELOCATED(1f)
1:
.code16
/* 16 bit code from here on... */
@@ -116,7 +118,7 @@ __realmode_call:
* configurations (limits, writability, etc.) once
* protected mode is turned off.
*/
- mov $0x30, %ax
+ mov $RAM_DATA16_SEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
@@ -186,10 +188,10 @@ __lcall_instr = RELOCATED(.)
/* Now that we are in protected mode
* jump to a 32 bit code segment.
*/
- ljmpl $0x10, $RELOCATED(1f)
+ ljmpl $RAM_CODE_SEG, $RELOCATED(1f)
1:
.code32
- mov $0x18, %ax
+ mov $RAM_DATA_SEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
@@ -242,7 +244,7 @@ __realmode_interrupt:
movl %eax, __registers + 20 /* edi */
/* This configures CS properly for real mode. */
- ljmp $0x28, $RELOCATED(1f)
+ ljmp $RAM_CODE16_SEG, $RELOCATED(1f)
1:
.code16 /* 16 bit code from here on... */
@@ -250,7 +252,7 @@ __realmode_interrupt:
* descriptors. They will retain these configurations (limits,
* writability, etc.) once protected mode is turned off.
*/
- mov $0x30, %ax
+ mov $RAM_DATA16_SEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
@@ -314,10 +316,10 @@ __intXX_instr = RELOCATED(.)
movl %eax, %cr0
/* Now that we are in protected mode jump to a 32-bit code segment. */
- ljmpl $0x10, $RELOCATED(1f)
+ ljmpl $RAM_CODE_SEG, $RELOCATED(1f)
1:
.code32
- mov $0x18, %ax
+ mov $RAM_DATA_SEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
@@ -363,10 +365,10 @@ __interrupt_handler_16bit = RELOCATED(.)
movl %eax, %cr0
/* ... and jump to a 32 bit code segment. */
- ljmpl $0x10, $RELOCATED(1f)
+ ljmpl $RAM_CODE_SEG, $RELOCATED(1f)
1:
.code32
- mov $0x18, %ax
+ mov $RAM_DATA_SEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
@@ -380,14 +382,14 @@ __interrupt_handler_16bit = RELOCATED(.)
call *%eax
/* Now return to real mode ... */
- ljmp $0x28, $RELOCATED(1f)
+ ljmp $RAM_CODE16_SEG, $RELOCATED(1f)
1:
.code16
/* Load the segment registers with properly configured segment
* descriptors. They will retain these configurations (limits,
* writability, etc.) once protected mode is turned off.
*/
- mov $0x30, %ax
+ mov $RAM_DATA16_SEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs