summaryrefslogtreecommitdiff
path: root/src/arch/x86
diff options
context:
space:
mode:
authorGabe Black <gabeblack@google.com>2013-07-07 14:08:30 -0700
committerRonald G. Minnich <rminnich@gmail.com>2013-07-14 20:35:24 +0200
commitccdc005b015f4211d0fa0d45393c5e4104620c53 (patch)
tree0dbd76678638644ed09800783eafb1229fc5777a /src/arch/x86
parent545c0caac8ff5cc5c8cc80b1c4d6dce5a2e17032 (diff)
x86: Add and enable an arch verson of memmove.
This is from memcpy_32.c in the Linux kernel. There was no copyright header in the original file either. Change-Id: Ifd259cb8a87615dce79ed1e551cc4bacb0414b4f Signed-off-by: Gabe Black <gabeblack@chromium.org> Reviewed-on: http://review.coreboot.org/3762 Tested-by: build bot (Jenkins) Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
Diffstat (limited to 'src/arch/x86')
-rw-r--r--src/arch/x86/Kconfig1
-rw-r--r--src/arch/x86/lib/Makefile.inc4
-rw-r--r--src/arch/x86/lib/memmove.c187
3 files changed, 192 insertions, 0 deletions
diff --git a/src/arch/x86/Kconfig b/src/arch/x86/Kconfig
index 287e2bcef0..581a0484f7 100644
--- a/src/arch/x86/Kconfig
+++ b/src/arch/x86/Kconfig
@@ -5,6 +5,7 @@ config X86_ARCH_OPTIONS
default y
select HAVE_ARCH_MEMSET
select HAVE_ARCH_MEMCPY
+ select HAVE_ARCH_MEMMOVE
config MARK_GRAPHICS_MEM_WRCOMB
bool "Mark graphics memory as write-combining."
diff --git a/src/arch/x86/lib/Makefile.inc b/src/arch/x86/lib/Makefile.inc
index 56e4171578..8588c7686e 100644
--- a/src/arch/x86/lib/Makefile.inc
+++ b/src/arch/x86/lib/Makefile.inc
@@ -6,6 +6,7 @@ ramstage-y += exception.c
ramstage-$(CONFIG_IOAPIC) += ioapic.c
ramstage-y += memset.c
ramstage-y += memcpy.c
+ramstage-y += memmove.c
ramstage-y += ebda.c
ramstage-y += rom_media.c
ramstage-$(CONFIG_COOP_MULTITASKING) += thread.c
@@ -15,13 +16,16 @@ romstage-$(CONFIG_EARLY_CONSOLE) += romstage_console.c
romstage-y += cbfs_and_run.c
romstage-y += memset.c
romstage-y += memcpy.c
+romstage-y += memmove.c
romstage-y += rom_media.c
smm-y += memset.c
smm-y += memcpy.c
+smm-y += memmove.c
smm-y += rom_media.c
rmodules-y += memset.c
rmodules-y += memcpy.c
+rmodules-y += memmove.c
$(obj)/arch/x86/lib/console.ramstage.o :: $(obj)/build.h
diff --git a/src/arch/x86/lib/memmove.c b/src/arch/x86/lib/memmove.c
new file mode 100644
index 0000000000..ba121278da
--- /dev/null
+++ b/src/arch/x86/lib/memmove.c
@@ -0,0 +1,187 @@
+#include <string.h>
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+ int d0,d1,d2,d3,d4,d5;
+ char *ret = dest;
+
+ __asm__ __volatile__(
+ /* Handle more 16bytes in loop */
+ "cmp $0x10, %0\n\t"
+ "jb 1f\n\t"
+
+ /* Decide forward/backward copy mode */
+ "cmp %2, %1\n\t"
+ "jb 2f\n\t"
+
+ /*
+ * movs instruction have many startup latency
+ * so we handle small size by general register.
+ */
+ "cmp $680, %0\n\t"
+ "jb 3f\n\t"
+ /*
+ * movs instruction is only good for aligned case.
+ */
+ "mov %1, %3\n\t"
+ "xor %2, %3\n\t"
+ "and $0xff, %3\n\t"
+ "jz 4f\n\t"
+ "3:\n\t"
+ "sub $0x10, %0\n\t"
+
+ /*
+ * We gobble 16byts forward in each loop.
+ */
+ "3:\n\t"
+ "sub $0x10, %0\n\t"
+ "mov 0*4(%1), %3\n\t"
+ "mov 1*4(%1), %4\n\t"
+ "mov %3, 0*4(%2)\n\t"
+ "mov %4, 1*4(%2)\n\t"
+ "mov 2*4(%1), %3\n\t"
+ "mov 3*4(%1), %4\n\t"
+ "mov %3, 2*4(%2)\n\t"
+ "mov %4, 3*4(%2)\n\t"
+ "lea 0x10(%1), %1\n\t"
+ "lea 0x10(%2), %2\n\t"
+ "jae 3b\n\t"
+ "add $0x10, %0\n\t"
+ "jmp 1f\n\t"
+
+ /*
+ * Handle data forward by movs.
+ */
+ ".p2align 4\n\t"
+ "4:\n\t"
+ "mov -4(%1, %0), %3\n\t"
+ "lea -4(%2, %0), %4\n\t"
+ "shr $2, %0\n\t"
+ "rep movsl\n\t"
+ "mov %3, (%4)\n\t"
+ "jmp 11f\n\t"
+ /*
+ * Handle data backward by movs.
+ */
+ ".p2align 4\n\t"
+ "6:\n\t"
+ "mov (%1), %3\n\t"
+ "mov %2, %4\n\t"
+ "lea -4(%1, %0), %1\n\t"
+ "lea -4(%2, %0), %2\n\t"
+ "shr $2, %0\n\t"
+ "std\n\t"
+ "rep movsl\n\t"
+ "mov %3,(%4)\n\t"
+ "cld\n\t"
+ "jmp 11f\n\t"
+
+ /*
+ * Start to prepare for backward copy.
+ */
+ ".p2align 4\n\t"
+ "2:\n\t"
+ "cmp $680, %0\n\t"
+ "jb 5f\n\t"
+ "mov %1, %3\n\t"
+ "xor %2, %3\n\t"
+ "and $0xff, %3\n\t"
+ "jz 6b\n\t"
+
+ /*
+ * Calculate copy position to tail.
+ */
+ "5:\n\t"
+ "add %0, %1\n\t"
+ "add %0, %2\n\t"
+ "sub $0x10, %0\n\t"
+
+ /*
+ * We gobble 16byts backward in each loop.
+ */
+ "7:\n\t"
+ "sub $0x10, %0\n\t"
+
+ "mov -1*4(%1), %3\n\t"
+ "mov -2*4(%1), %4\n\t"
+ "mov %3, -1*4(%2)\n\t"
+ "mov %4, -2*4(%2)\n\t"
+ "mov -3*4(%1), %3\n\t"
+ "mov -4*4(%1), %4\n\t"
+ "mov %3, -3*4(%2)\n\t"
+ "mov %4, -4*4(%2)\n\t"
+ "lea -0x10(%1), %1\n\t"
+ "lea -0x10(%2), %2\n\t"
+ "jae 7b\n\t"
+ /*
+ * Calculate copy position to head.
+ */
+ "add $0x10, %0\n\t"
+ "sub %0, %1\n\t"
+ "sub %0, %2\n\t"
+
+ /*
+ * Move data from 8 bytes to 15 bytes.
+ */
+ ".p2align 4\n\t"
+ "1:\n\t"
+ "cmp $8, %0\n\t"
+ "jb 8f\n\t"
+ "mov 0*4(%1), %3\n\t"
+ "mov 1*4(%1), %4\n\t"
+ "mov -2*4(%1, %0), %5\n\t"
+ "mov -1*4(%1, %0), %1\n\t"
+
+ "mov %3, 0*4(%2)\n\t"
+ "mov %4, 1*4(%2)\n\t"
+ "mov %5, -2*4(%2, %0)\n\t"
+ "mov %1, -1*4(%2, %0)\n\t"
+ "jmp 11f\n\t"
+
+ /*
+ * Move data from 4 bytes to 7 bytes.
+ */
+ ".p2align 4\n\t"
+ "8:\n\t"
+ "cmp $4, %0\n\t"
+ "jb 9f\n\t"
+ "mov 0*4(%1), %3\n\t"
+ "mov -1*4(%1, %0), %4\n\t"
+ "mov %3, 0*4(%2)\n\t"
+ "mov %4, -1*4(%2, %0)\n\t"
+ "jmp 11f\n\t"
+
+ /*
+ * Move data from 2 bytes to 3 bytes.
+ */
+ ".p2align 4\n\t"
+ "9:\n\t"
+ "cmp $2, %0\n\t"
+ "jb 10f\n\t"
+ "movw 0*2(%1), %%dx\n\t"
+ "movw -1*2(%1, %0), %%bx\n\t"
+ "movw %%dx, 0*2(%2)\n\t"
+ "movw %%bx, -1*2(%2, %0)\n\t"
+ "jmp 11f\n\t"
+
+ /*
+ * Move data for 1 byte.
+ */
+ ".p2align 4\n\t"
+ "10:\n\t"
+ "cmp $1, %0\n\t"
+ "jb 11f\n\t"
+ "movb (%1), %%cl\n\t"
+ "movb %%cl, (%2)\n\t"
+ ".p2align 4\n\t"
+ "11:"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2),
+ "=r" (d3),"=r" (d4), "=r"(d5)
+ :"0" (n),
+ "1" (src),
+ "2" (dest)
+ :"memory");
+
+ return ret;
+
+}