summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Kconfig2
-rw-r--r--src/arch/x86/c_start.S6
-rw-r--r--src/arch/x86/include/arch/cpu.h7
-rw-r--r--src/cpu/x86/Kconfig9
-rw-r--r--src/security/intel/txt/getsec_enteraccs.S4
5 files changed, 3 insertions, 25 deletions
diff --git a/src/Kconfig b/src/Kconfig
index b33ea6253e..08b24371bd 100644
--- a/src/Kconfig
+++ b/src/Kconfig
@@ -715,7 +715,7 @@ config TIMER_QUEUE
config COOP_MULTITASKING
def_bool n
select TIMER_QUEUE
- depends on ARCH_X86 && CPU_INFO_V2
+ depends on ARCH_X86
help
Cooperative multitasking allows callbacks to be multiplexed on the
main thread. With this enabled it allows for multiple execution paths
diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S
index 84fbed2956..02a9b8933a 100644
--- a/src/arch/x86/c_start.S
+++ b/src/arch/x86/c_start.S
@@ -80,7 +80,6 @@ _start:
push_cpu_info
-#if CONFIG(CPU_INFO_V2)
/* Allocate the per_cpu_segment_data on the stack */
push_per_cpu_segment_data
@@ -93,7 +92,6 @@ _start:
mov $per_cpu_segment_selector, %eax
movl (%eax), %eax
mov %eax, %gs
-#endif
/*
* Now we are finished. Memory is up, data is copied and
@@ -222,7 +220,6 @@ gdt:
.word 0xffff, 0x0000
.byte 0x00, 0x9b, 0xaf, 0x00
#endif
-#if CONFIG(CPU_INFO_V2)
per_cpu_segment_descriptors:
.rept CONFIG_MAX_CPUS
/* flat data segment */
@@ -233,14 +230,11 @@ per_cpu_segment_descriptors:
.byte 0x00, 0x93, 0xcf, 0x00
#endif
.endr
-#endif /* CPU_INFO_V2 */
gdt_end:
-#if CONFIG(CPU_INFO_V2)
/* Segment selector pointing to the first per_cpu_segment_descriptor. */
per_cpu_segment_selector:
.long per_cpu_segment_descriptors - gdt
-#endif /* CPU_INFO_V2 */
.section ".text._start", "ax", @progbits
#if ENV_X86_64
diff --git a/src/arch/x86/include/arch/cpu.h b/src/arch/x86/include/arch/cpu.h
index 2d2879f6db..0087a795ec 100644
--- a/src/arch/x86/include/arch/cpu.h
+++ b/src/arch/x86/include/arch/cpu.h
@@ -149,8 +149,6 @@ struct per_cpu_segment_data {
static inline struct cpu_info *cpu_info(void)
{
-/* We use a #if because we don't want to mess with the &s below. */
-#if CONFIG(CPU_INFO_V2)
struct cpu_info *ci = NULL;
__asm__("mov %%gs:%c[offset], %[ci]"
@@ -159,11 +157,6 @@ static inline struct cpu_info *cpu_info(void)
);
return ci;
-#else
- char s;
- uintptr_t info = ALIGN_UP((uintptr_t)&s, CONFIG_STACK_SIZE) - sizeof(struct cpu_info);
- return (struct cpu_info *)info;
-#endif /* CPU_INFO_V2 */
}
struct cpuinfo_x86 {
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index b136e9ee5b..1d6b489ef1 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -2,7 +2,6 @@ if ARCH_X86
config PARALLEL_MP
def_bool y
- select CPU_INFO_V2
help
This option uses common MP infrastructure for bringing up APs
in parallel. It additionally provides a more flexible mechanism
@@ -210,12 +209,4 @@ config RESERVE_MTRRS_FOR_OS
However, modern OSes use PAT to control cacheability instead of
using MTRRs.
-config CPU_INFO_V2
- bool
- depends on PARALLEL_MP
- help
- Enables the new method of locating struct cpu_info. This new method
- uses the %gs segment to locate the cpu_info pointer. The old method
- relied on the stack being CONFIG_STACK_SIZE aligned.
-
endif # ARCH_X86
diff --git a/src/security/intel/txt/getsec_enteraccs.S b/src/security/intel/txt/getsec_enteraccs.S
index cbb24b6511..ff9db05f06 100644
--- a/src/security/intel/txt/getsec_enteraccs.S
+++ b/src/security/intel/txt/getsec_enteraccs.S
@@ -227,7 +227,7 @@ cond_clear_var_mtrrs:
movd %esp, %xmm0
movd %ebp, %xmm1
- /* Backup %gs used by CPU_INFO_V2 */
+ /* Backup %gs used by cpu_info() */
movl %gs, %eax
movd %eax, %xmm2
@@ -265,7 +265,7 @@ cond_clear_var_mtrrs:
movl %eax, %es
movl %eax, %ss
movl %eax, %fs
- /* Restore %gs used by CPU_INFO_V2 */
+ /* Restore %gs used by cpu_info */
movd %xmm2, %eax
movl %eax, %gs