summaryrefslogtreecommitdiff
path: root/src/arch/arm64
diff options
context:
space:
mode:
authorAaron Durbin <adurbin@chromium.org>2014-09-06 01:24:38 -0500
committerPatrick Georgi <pgeorgi@google.com>2015-03-27 08:04:47 +0100
commitdbab8cbc31ffbcf4193defae4e8fd7d26e8ae519 (patch)
tree77824993fc42fa04c8cf4173f235121e02cfb8d8 /src/arch/arm64
parentef4419e2cd29bcae4bf42b8230be0baec9e367a9 (diff)
arm64: add spinlock implementation
Provide a simple spinlock implentation for arm64. A value of 0 is unlocked and a value of 1 is locked. BUG=chrome-os-partner:31761 BRANCH=None TEST=Built and ran SMP bringup on ryu. Change-Id: Ie88a715a6b51cd38a5fdd830583dae528cc49d67 Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Original-Commit-Id: 14dab94610c96d6b1530c64d661833f8e613101c Original-Change-Id: I3bf2d80b91112d04442455ff0fa3f16900b7327f Original-Signed-off-by: Aaron Durbin <adurbin@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/216923 Original-Reviewed-by: Furquan Shaikh <furquan@chromium.org> Reviewed-on: http://review.coreboot.org/9040 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Diffstat (limited to 'src/arch/arm64')
-rw-r--r--src/arch/arm64/include/armv8/arch/smp/spinlock.h25
1 files changed, 24 insertions, 1 deletions
diff --git a/src/arch/arm64/include/armv8/arch/smp/spinlock.h b/src/arch/arm64/include/armv8/arch/smp/spinlock.h
index 8a89d1f011..10278e72ae 100644
--- a/src/arch/arm64/include/armv8/arch/smp/spinlock.h
+++ b/src/arch/arm64/include/armv8/arch/smp/spinlock.h
@@ -1,6 +1,29 @@
#ifndef ARCH_SMP_SPINLOCK_H
#define ARCH_SMP_SPINLOCK_H
-#error "spinlocks: implement this for ARM64"
+#include <arch/barrier.h>
+#include <stdint.h>
+
+typedef struct {
+ volatile uint32_t lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define DECLARE_SPIN_LOCK(x) static spinlock_t x = SPIN_LOCK_UNLOCKED;
+
+static inline void spin_lock(spinlock_t *spin)
+{
+ while (1) {
+ if (load_acquire_exclusive(&spin->lock) != 0)
+ continue;
+ if (store_release_exclusive(&spin->lock, 1))
+ break;
+ }
+}
+
+static inline void spin_unlock(spinlock_t *spin)
+{
+ store_release(&spin->lock, 0);
+}
#endif