summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArthur Heymans <arthur@aheymans.xyz>2021-12-30 21:12:35 +0100
committerArthur Heymans <arthur@aheymans.xyz>2022-01-15 14:48:46 +0000
commitac24a96579d1b26978081b7cf29874474aabc525 (patch)
tree99cc791f715223308a128b4184f2fda07132f188
parent395f5b3129ad046a220aa4cf975279bfdebc19df (diff)
arch/x86/spinlock.h: Support systems with >128 cores
Each time the spinlock is acquired a byte is decreased and then the sign of the byte is checked. If there are more than 128 cores the sign check will overflow. An easy fix is to increase the word size of the spinlock acquiring and releasing. TEST: See that serialized SMM relocation is still serialized on systems with >128 cores. Change-Id: I76afaa60669335090743d99381280e74aa9fb5b1 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/60539 Reviewed-by: Kyösti Mälkki <kyosti.malkki@gmail.com> Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
-rw-r--r--src/arch/x86/include/arch/smp/spinlock.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/arch/x86/include/arch/smp/spinlock.h b/src/arch/x86/include/arch/smp/spinlock.h
index c7008c1b8c..cb25531b15 100644
--- a/src/arch/x86/include/arch/smp/spinlock.h
+++ b/src/arch/x86/include/arch/smp/spinlock.h
@@ -25,17 +25,17 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
#define barrier() __asm__ __volatile__("" : : : "memory")
-#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
+#define spin_is_locked(x) (*(volatile int *)(&(x)->lock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
#undef barrier
#define spin_lock_string \
"\n1:\t" \
- "lock ; decb %0\n\t" \
+ "lock ; decl %0\n\t" \
"js 2f\n" \
".section .text.lock,\"ax\"\n" \
"2:\t" \
- "cmpb $0,%0\n\t" \
+ "cmpl $0,%0\n\t" \
"rep;nop\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
@@ -45,7 +45,7 @@ typedef struct {
* This works. Despite all the confusion.
*/
#define spin_unlock_string \
- "movb $1,%0"
+ "movl $1,%0"
static __always_inline void spin_lock(spinlock_t *lock)
{