summaryrefslogtreecommitdiff
path: root/src/arch/x86
diff options
context:
space:
mode:
authorArthur Heymans <arthur@aheymans.xyz>2023-10-25 08:22:49 +0200
committerMartin L Roth <gaumless@gmail.com>2023-11-01 15:47:03 +0000
commitfdf85614dcb6d9a0d2134f2d2c82ff85ea185b44 (patch)
treea3ec85f5e42ef489b24db37d88ee0df0ec441501 /src/arch/x86
parent8bd7d6c806d2a7122b524279302688db2f93ea98 (diff)
arch/x86/memcpy.c: Optimize code for 64bit
In 64bit movsq is available which moves memory in chunks of 8 bytes rather than 4 bytes. Linux uses the same code. Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Change-Id: I65f178d2ed3aae54b0c1ce739c2b4af8738b9fcc Reviewed-on: https://review.coreboot.org/c/coreboot/+/78646 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Felix Held <felix-coreboot@felixheld.de> Reviewed-by: Martin Roth <martin.roth@amd.corp-partner.google.com> Reviewed-by: Jérémy Compostella <jeremy.compostella@intel.com> Reviewed-by: Eric Lai <ericllai@google.com>
Diffstat (limited to 'src/arch/x86')
-rw-r--r--src/arch/x86/memcpy.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/src/arch/x86/memcpy.c b/src/arch/x86/memcpy.c
index 93002cdd27..d96a93cd93 100644
--- a/src/arch/x86/memcpy.c
+++ b/src/arch/x86/memcpy.c
@@ -14,19 +14,26 @@ void *memcpy(void *dest, const void *src, size_t n)
check_memory_region((unsigned long)dest, n, true, _RET_IP_);
#endif
- asm volatile(
#if ENV_X86_64
- "rep ; movsd\n\t"
+ asm volatile(
+ "rep ; movsq\n\t"
"mov %4,%%rcx\n\t"
+ "rep ; movsb\n\t"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src)
+ : "memory"
+ );
#else
+ asm volatile(
"rep ; movsl\n\t"
"movl %4,%%ecx\n\t"
-#endif
"rep ; movsb\n\t"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src)
: "memory"
);
+#endif
+
return dest;
}