summaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorArthur Heymans <arthur@aheymans.xyz>2022-11-17 12:08:31 +0100
committerFelix Held <felix-coreboot@felixheld.de>2022-11-24 15:15:41 +0000
commit5e3798ca484b48843662c84967cc6ac88fa789dd (patch)
treea8c972d01ac40965d6009db37d6e4ca40640da8c /src/arch
parenta9921bcadb7e8c179c656b7b115bce37cb45f0eb (diff)
arch/arm: Use unified assembly syntax
Taken from Linux which also updated these files. Clang only works with this syntax, so this fixes builds for arm. TESTED on qemu vexpress-a9 and verstage on google/vilboz with BUILD_TIMELESS=1, binaries remain the same. Change-Id: Ia320dc2c460c99d934b8f17dee7748a9def4e750 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/63058 Reviewed-by: Martin L Roth <gaumless@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/arm/libgcc/lib1funcs.S6
-rw-r--r--src/arch/arm/memcpy.S8
-rw-r--r--src/arch/arm/memmove.S22
-rw-r--r--src/arch/arm/memset.S40
4 files changed, 42 insertions, 34 deletions
diff --git a/src/arch/arm/libgcc/lib1funcs.S b/src/arch/arm/libgcc/lib1funcs.S
index 7a8fedf06e..a520ad35f4 100644
--- a/src/arch/arm/libgcc/lib1funcs.S
+++ b/src/arch/arm/libgcc/lib1funcs.S
@@ -8,6 +8,8 @@
#include <arch/asm.h>
+.syntax unified
+
.macro ARM_DIV_BODY dividend, divisor, result, curbit
#if __COREBOOT_ARM_ARCH__ >= 5
@@ -67,7 +69,7 @@
subhs \dividend, \dividend, \divisor, lsr #3
orrhs \result, \result, \curbit, lsr #3
cmp \dividend, #0 @ Early termination?
- movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
+ movsne \curbit, \curbit, lsr #4 @ No, any more bits to do?
movne \divisor, \divisor, lsr #4
bne 1b
@@ -153,7 +155,7 @@
subhs \dividend, \dividend, \divisor, lsr #3
cmp \dividend, #1
mov \divisor, \divisor, lsr #4
- subges \order, \order, #4
+ subsge \order, \order, #4
bge 1b
tst \order, #3
diff --git a/src/arch/arm/memcpy.S b/src/arch/arm/memcpy.S
index d5e28ac147..b46944a9bd 100644
--- a/src/arch/arm/memcpy.S
+++ b/src/arch/arm/memcpy.S
@@ -6,6 +6,8 @@
#include <arch/asm.h>
#include "asmlib.h"
+.syntax unified
+
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
@@ -22,7 +24,7 @@
.endm
.macro ldr1b ptr reg cond=al abort
- ldr\cond\()b \reg, [\ptr], #1
+ ldrb\cond \reg, [\ptr], #1
.endm
.macro str1w ptr reg abort
@@ -34,7 +36,7 @@
.endm
.macro str1b ptr reg cond=al abort
- str\cond\()b \reg, [\ptr], #1
+ strb\cond \reg, [\ptr], #1
.endm
.macro enter reg1 reg2
@@ -197,7 +199,7 @@ ENTRY(memcpy)
orr r9, r9, ip, push #\push
mov ip, ip, pull #\pull
orr ip, ip, lr, push #\push
- str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
+ str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
diff --git a/src/arch/arm/memmove.S b/src/arch/arm/memmove.S
index 6595beea97..f5f6340b13 100644
--- a/src/arch/arm/memmove.S
+++ b/src/arch/arm/memmove.S
@@ -4,6 +4,8 @@
#include <arch/asm.h>
#include "asmlib.h"
+.syntax unified
+
/*
* Prototype: void *memmove(void *dest, const void *src, size_t n);
*
@@ -88,20 +90,20 @@ ENTRY(memmove)
7: ldmfd sp!, {r5 - r8}
8: movs r2, r2, lsl #31
- ldrneb r3, [r1, #-1]!
- ldrcsb r4, [r1, #-1]!
- ldrcsb ip, [r1, #-1]
- strneb r3, [r0, #-1]!
- strcsb r4, [r0, #-1]!
- strcsb ip, [r0, #-1]
+ ldrbne r3, [r1, #-1]!
+ ldrbcs r4, [r1, #-1]!
+ ldrbcs ip, [r1, #-1]
+ strbne r3, [r0, #-1]!
+ strbcs r4, [r0, #-1]!
+ strbcs ip, [r0, #-1]
ldmfd sp!, {r0, r4, pc}
9: cmp ip, #2
- ldrgtb r3, [r1, #-1]!
- ldrgeb r4, [r1, #-1]!
+ ldrbgt r3, [r1, #-1]!
+ ldrbge r4, [r1, #-1]!
ldrb lr, [r1, #-1]!
- strgtb r3, [r0, #-1]!
- strgeb r4, [r0, #-1]!
+ strbgt r3, [r0, #-1]!
+ strbge r4, [r0, #-1]!
subs r2, r2, ip
strb lr, [r0, #-1]!
blt 8b
diff --git a/src/arch/arm/memset.S b/src/arch/arm/memset.S
index 5da5353954..4a29646c02 100644
--- a/src/arch/arm/memset.S
+++ b/src/arch/arm/memset.S
@@ -8,6 +8,8 @@
#include <arch/asm.h>
#include "asmlib.h"
+.syntax unified
+
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
@@ -31,20 +33,20 @@ ENTRY(memset)
mov lr, r1
2: subs r2, r2, #64
- stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
- stmgeia ip!, {r1, r3, r8, lr}
- stmgeia ip!, {r1, r3, r8, lr}
- stmgeia ip!, {r1, r3, r8, lr}
+ stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
+ stmiage ip!, {r1, r3, r8, lr}
+ stmiage ip!, {r1, r3, r8, lr}
+ stmiage ip!, {r1, r3, r8, lr}
bgt 2b
- ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
+ ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
- stmneia ip!, {r1, r3, r8, lr}
- stmneia ip!, {r1, r3, r8, lr}
+ stmiane ip!, {r1, r3, r8, lr}
+ stmiane ip!, {r1, r3, r8, lr}
tst r2, #16
- stmneia ip!, {r1, r3, r8, lr}
+ stmiane ip!, {r1, r3, r8, lr}
ldmfd sp!, {r8, lr}
#else
@@ -77,21 +79,21 @@ ENTRY(memset)
strne r1, [ip], #4
3: subs r2, r2, #64
- stmgeia ip!, {r1, r3-r8, lr}
- stmgeia ip!, {r1, r3-r8, lr}
+ stmiage ip!, {r1, r3-r8, lr}
+ stmiage ip!, {r1, r3-r8, lr}
bgt 3b
- ldmeqfd sp!, {r4-r8, pc}
+ ldmfdeq sp!, {r4-r8, pc}
tst r2, #32
- stmneia ip!, {r1, r3-r8, lr}
+ stmiane ip!, {r1, r3-r8, lr}
tst r2, #16
- stmneia ip!, {r4-r7}
+ stmiane ip!, {r4-r7}
ldmfd sp!, {r4-r8, lr}
#endif
4: tst r2, #8
- stmneia ip!, {r1, r3}
+ stmiane ip!, {r1, r3}
tst r2, #4
strne r1, [ip], #4
/*
@@ -99,17 +101,17 @@ ENTRY(memset)
* may have an unaligned pointer as well.
*/
5: tst r2, #2
- strneb r1, [ip], #1
- strneb r1, [ip], #1
+ strbne r1, [ip], #1
+ strbne r1, [ip], #1
tst r2, #1
- strneb r1, [ip], #1
+ strbne r1, [ip], #1
mov pc, lr
6: subs r2, r2, #4 @ 1 do we have enough
blt 5b @ 1 bytes to align with?
cmp r3, #2 @ 1
- strltb r1, [ip], #1 @ 1
- strleb r1, [ip], #1 @ 1
+ strblt r1, [ip], #1 @ 1
+ strble r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b