summaryrefslogtreecommitdiff
path: root/src/arch/x86/include
diff options
context:
space:
mode:
authorKyösti Mälkki <kyosti.malkki@gmail.com>2020-07-19 20:04:24 +0300
committerPatrick Georgi <pgeorgi@google.com>2020-07-26 21:00:13 +0000
commit0199d3bd7f60109796e8f97ac1bb4df0a237f7f1 (patch)
tree588114ef227b08fe7ac119951c575809d546c79f /src/arch/x86/include
parentc73178892902f43f74ad29d65e1ce8775c2d800e (diff)
arch/x86: Move cpu_relax()
It's not related to spinlocks and the actual implementation was also guarded by CONFIG(SMP). With a single call-site in x86-specific code, empty stubs for other arch are currently not necessary. Also drop an unused included on a nearby line. Change-Id: I00439e9c1d10c943ab5e404f5d687d316768fa16 Signed-off-by: Kyösti Mälkki <kyosti.malkki@gmail.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/43808 Reviewed-by: Angel Pons <th3fanbus@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/arch/x86/include')
-rw-r--r--src/arch/x86/include/arch/cpu.h6
-rw-r--r--src/arch/x86/include/arch/smp/spinlock.h7
2 files changed, 6 insertions, 7 deletions
diff --git a/src/arch/x86/include/arch/cpu.h b/src/arch/x86/include/arch/cpu.h
index 82f470ec2d..b622465a25 100644
--- a/src/arch/x86/include/arch/cpu.h
+++ b/src/arch/x86/include/arch/cpu.h
@@ -269,6 +269,12 @@ static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
}
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static __always_inline void cpu_relax(void)
+{
+ __asm__ __volatile__("rep;nop" : : : "memory");
+}
+
#define asmlinkage __attribute__((regparm(0)))
/*
diff --git a/src/arch/x86/include/arch/smp/spinlock.h b/src/arch/x86/include/arch/smp/spinlock.h
index 41189933d9..a05d47a233 100644
--- a/src/arch/x86/include/arch/smp/spinlock.h
+++ b/src/arch/x86/include/arch/smp/spinlock.h
@@ -62,12 +62,6 @@ static __always_inline void spin_unlock(spinlock_t *lock)
: "=m" (lock->lock) : : "memory");
}
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static __always_inline void cpu_relax(void)
-{
- __asm__ __volatile__("rep;nop" : : : "memory");
-}
-
#else
#define DECLARE_SPIN_LOCK(x)
@@ -76,7 +70,6 @@ static __always_inline void cpu_relax(void)
#define spin_unlock_wait(lock) do {} while (0)
#define spin_lock(lock) do {} while (0)
#define spin_unlock(lock) do {} while (0)
-#define cpu_relax() do {} while (0)
#endif