summaryrefslogtreecommitdiff
path: root/payloads/libpayload/arch/arm/memset.S
diff options
context:
space:
mode:
authorGabe Black <gabeblack@google.com>2013-09-30 23:00:33 -0700
committerIsaac Christensen <isaac.christensen@se-eng.com>2014-09-08 18:59:23 +0200
commit51edd54738b2248e92580caa317aa4e8e1694d40 (patch)
treed10c6be921f2d3656d9f504f4572a1c73f7860e3 /payloads/libpayload/arch/arm/memset.S
parent94b4a266fb4df1f2f59ed8052c150ee4bf3e6d41 (diff)
ARM: Generalize armv7 as arm.
There are ARM systems which are essentially heterogeneous multicores where some cores implement a different ARM architecture version than other cores. A specific example is the tegra124 which boots on an ARMv4 coprocessor while most code, including most of the firmware, runs on the main ARMv7 core. To support SOCs like this, the plan is to generalize the ARM architecture so that all versions are available, and an SOC/CPU can then select what architecture variant should be used for each component of the firmware; bootblock, romstage, and ramstage. Old-Change-Id: I22e048c3bc72bd56371e14200942e436c1e312c2 Signed-off-by: Gabe Black <gabeblack@google.com> Reviewed-on: https://chromium-review.googlesource.com/171338 Reviewed-by: Gabe Black <gabeblack@chromium.org> Commit-Queue: Gabe Black <gabeblack@chromium.org> Tested-by: Gabe Black <gabeblack@chromium.org> (cherry picked from commit 8423a41529da0ff67fb9873be1e2beb30b09ae2d) Signed-off-by: Isaac Christensen <isaac.christensen@se-eng.com> ARM: Split out ARMv7 code and make it possible to have other arch versions. We don't always want to use ARMv7 code when building for ARM, so we should separate out the ARMv7 code so it can be excluded, and also make it possible to include code for some other version of the architecture instead, all per build component for cases where we need more than one architecture version at a time. The tegra124 bootblock will ultimately need to be ARMv4, but until we have some ARMv4 code to switch over to we can leave it set to ARMv7. Old-Change-Id: Ia982c91057fac9c252397b7c866224f103761cc7 Reviewed-on: https://chromium-review.googlesource.com/171400 Reviewed-by: Gabe Black <gabeblack@chromium.org> Tested-by: Gabe Black <gabeblack@chromium.org> Commit-Queue: Gabe Black <gabeblack@chromium.org> (cherry picked from commit 799514e6060aa97acdcf081b5c48f965be134483) Squashed two related patches for splitting ARM support into general ARM support and ARMv7 specific pieces. Change-Id: Ic6511507953a2223c87c55f90252c4a4e1dd6010 Signed-off-by: Isaac Christensen <isaac.christensen@se-eng.com> Reviewed-on: http://review.coreboot.org/6782 Tested-by: build bot (Jenkins)
Diffstat (limited to 'payloads/libpayload/arch/arm/memset.S')
-rw-r--r--payloads/libpayload/arch/arm/memset.S127
1 files changed, 127 insertions, 0 deletions
diff --git a/payloads/libpayload/arch/arm/memset.S b/payloads/libpayload/arch/arm/memset.S
new file mode 100644
index 0000000000..aa4f57a993
--- /dev/null
+++ b/payloads/libpayload/arch/arm/memset.S
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/arm/lib/memset.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ASM optimised string functions
+ */
+#include "assembler.h"
+
+ .text
+ .align 5
+ .word 0
+
+1: subs r2, r2, #4 @ 1 do we have enough
+ blt 5f @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strltb r1, [r0], #1 @ 1
+ strleb r1, [r0], #1 @ 1
+ strb r1, [r0], #1 @ 1
+ add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
+/*
+ * The pointer is now aligned and the length is adjusted. Try doing the
+ * memset again.
+ */
+
+.type memset, function
+.globl memset
+memset:
+ ands r3, r0, #3 @ 1 unaligned?
+ bne 1b @ 1
+/*
+ * we know that the pointer in r0 is aligned to a word boundary.
+ */
+ orr r1, r1, r1, lsl #8
+ orr r1, r1, r1, lsl #16
+ mov r3, r1
+ cmp r2, #16
+ blt 4f
+
+#if ! CALGN(1)+0
+
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+ str lr, [sp, #-4]!
+ mov ip, r1
+ mov lr, r1
+
+2: subs r2, r2, #64
+ stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
+ stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia r0!, {r1, r3, ip, lr}
+ bgt 2b
+ ldmeqfd sp!, {pc} @ Now <64 bytes to go.
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+ tst r2, #32
+ stmneia r0!, {r1, r3, ip, lr}
+ stmneia r0!, {r1, r3, ip, lr}
+ tst r2, #16
+ stmneia r0!, {r1, r3, ip, lr}
+ ldr lr, [sp], #4
+
+#else
+
+/*
+ * This version aligns the destination pointer in order to write
+ * whole cache lines at once.
+ */
+
+ stmfd sp!, {r4-r7, lr}
+ mov r4, r1
+ mov r5, r1
+ mov r6, r1
+ mov r7, r1
+ mov ip, r1
+ mov lr, r1
+
+ cmp r2, #96
+ tstgt r0, #31
+ ble 3f
+
+ and ip, r0, #31
+ rsb ip, ip, #32
+ sub r2, r2, ip
+ movs ip, ip, lsl #(32 - 4)
+ stmcsia r0!, {r4, r5, r6, r7}
+ stmmiia r0!, {r4, r5}
+ tst ip, #(1 << 30)
+ mov ip, r1
+ strne r1, [r0], #4
+
+3: subs r2, r2, #64
+ stmgeia r0!, {r1, r3-r7, ip, lr}
+ stmgeia r0!, {r1, r3-r7, ip, lr}
+ bgt 3b
+ ldmeqfd sp!, {r4-r7, pc}
+
+ tst r2, #32
+ stmneia r0!, {r1, r3-r7, ip, lr}
+ tst r2, #16
+ stmneia r0!, {r4-r7}
+ ldmfd sp!, {r4-r7, lr}
+
+#endif
+
+4: tst r2, #8
+ stmneia r0!, {r1, r3}
+ tst r2, #4
+ strne r1, [r0], #4
+/*
+ * When we get here, we've got less than 4 bytes to zero. We
+ * may have an unaligned pointer as well.
+ */
+5: tst r2, #2
+ strneb r1, [r0], #1
+ strneb r1, [r0], #1
+ tst r2, #1
+ strneb r1, [r0], #1
+ mov pc, lr