diff options
author | Stefan Reinauer <stefan.reinauer@coreboot.org> | 2012-12-07 17:15:04 -0800 |
---|---|---|
committer | Ronald G. Minnich <rminnich@gmail.com> | 2012-12-08 06:53:19 +0100 |
commit | 52db0b984523047da19ca3b41558b9dbf45abad7 (patch) | |
tree | 5ed389bb233d5b007593ede56040ccf268e37bbe /src/arch/armv7/include/smp | |
parent | 509f77277cfccdae897f0d369672ce0818ecdf88 (diff) |
WIP: Initial ARMv7 architecture implementation in coreboot
The first ARMv7 CPU we're going to support is the Exynos 5250
used in the Google Snow ChromeBook.
Change-Id: I4de8433bbc6202eb8fef2556a11186a3376d411b
Signed-off-by: David Hendricks <dhendrix@chromium.org>
Signed-off-by: Stefan Reinauer <reinauer@google.com>
Signed-off-by: Ronald G. Minnich <rminnich@gmail.com>
Reviewed-on: http://review.coreboot.org/2004
Tested-by: build bot (Jenkins)
Diffstat (limited to 'src/arch/armv7/include/smp')
-rw-r--r-- | src/arch/armv7/include/smp/spinlock.h | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/src/arch/armv7/include/smp/spinlock.h b/src/arch/armv7/include/smp/spinlock.h new file mode 100644 index 0000000000..1dc397c19c --- /dev/null +++ b/src/arch/armv7/include/smp/spinlock.h @@ -0,0 +1,52 @@ +#ifndef ARCH_SMP_SPINLOCK_H +#define ARCH_SMP_SPINLOCK_H + +/* FIXME: implement this for ARM */ +#error "implement this for ARM" +#if 0 +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + */ + +typedef struct { + volatile unsigned int lock; +} spinlock_t; + + +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } +#define DECLARE_SPIN_LOCK(x) static spinlock_t x = SPIN_LOCK_UNLOCKED; + +#define barrier() __asm__ __volatile__("": : :"memory") +#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) != 0) +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + +static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock) +{ + unsigned long tmp; + __asm__ __volatile__ ( + "1: ldrex %0, [%1]\n" + " teq %0, #0\n" + " strexeq %0, %2, [%1]\n" + " teqeq %0, #0\n" + " bne 1b\n" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "cc" + ); + barrier(); +} + +static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__( + " str %1, [%0]\n" + : + : "r" (&lock->lock), "r" (0) + : "cc" + ); +} + +#define cpu_relax() barrier() + +#endif +#endif /* ARCH_SMP_SPINLOCK_H */ |