From e807c34a5e34e8dd7cb959458de593ea1070fde4 Mon Sep 17 00:00:00 2001 From: Duncan Laurie Date: Mon, 10 Jun 2013 09:53:33 -0700 Subject: cmos post: Guard with spinlock The CMOS post code storage mechanism does back-to-back CMOS reads and writes that may be interleaved during CPU bringup, leading to corruption of the log or of other parts of CMOS. Change-Id: I704813cc917a659fe034b71c2ff9eb9b80f7c949 Signed-off-by: Duncan Laurie Reviewed-on: https://gerrit.chromium.org/gerrit/58102 Reviewed-by: Aaron Durbin Reviewed-on: http://review.coreboot.org/4227 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer --- src/arch/x86/include/arch/smp/spinlock.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'src/arch/x86/include') diff --git a/src/arch/x86/include/arch/smp/spinlock.h b/src/arch/x86/include/arch/smp/spinlock.h index 5c1dd94c36..32be2f25fb 100644 --- a/src/arch/x86/include/arch/smp/spinlock.h +++ b/src/arch/x86/include/arch/smp/spinlock.h @@ -1,6 +1,8 @@ #ifndef ARCH_SMP_SPINLOCK_H #define ARCH_SMP_SPINLOCK_H +#ifndef __PRE_RAM__ + /* * Your basic SMP spinlocks, allowing only a single CPU anywhere */ @@ -61,4 +63,16 @@ static inline __attribute__((always_inline)) void cpu_relax(void) __asm__ __volatile__("rep;nop": : :"memory"); } +#else /* !__PRE_RAM__ */ + +#define DECLARE_SPIN_LOCK(x) +#define barrier() do {} while(0) +#define spin_is_locked(lock) 0 +#define spin_unlock_wait(lock) do {} while(0) +#define spin_lock(lock) do {} while(0) +#define spin_unlock(lock) do {} while(0) +#define cpu_relax() do {} while(0) + +#endif /* !__PRE_RAM__ */ + #endif /* ARCH_SMP_SPINLOCK_H */ -- cgit v1.2.3