diff options
author | Benjamin Barenblat <bbaren@google.com> | 2016-06-17 09:49:24 -0700 |
---|---|---|
committer | Martin Roth <martinroth@google.com> | 2016-06-24 19:10:05 +0200 |
commit | 82ef8ada82bd63ea7ce61843189fd4ee5de45cb5 (patch) | |
tree | d6ff6b96487b2b56f2efa84c51deff299925dc20 /src/commonlib/lz4_wrapper.c | |
parent | c86da67436827c25919a2f5966049485a58fc984 (diff) |
src/commonlib/lz4_wrapper: Correct inline asm for unaligned 64-bit copy
Rewrite inline assembly for ARMv7+ to correctly annotate inputs and
outputs. On ARM GCC 6.1.1, this causes assembly output to change from
the incorrect
@ r0 is allocated to hold dst and x0
@ r1 is allocated to hold src and x1
ldr r0, [r1] @ clobbers dst!
ldr r1, [r1, #4]
str r0, [r0]
str r1, [r0, #4]
to the correct
@ r0 is allocated to hold dst
@ r1 is allocated to hold src and x1
@ r3 is allocated to hold x0
ldr r3, [r1]
ldr r1, [r1, #4]
str r3, [r0]
str r1, [r0, #4]
Also modify checkpatch.pl to ignore spaces before opening brackets when
used in inline assembly.
Change-Id: I255995f5e0a7b1a95375258755a93972c51d79b8
Signed-off-by: Benjamin Barenblat <bbaren@google.com>
Reviewed-on: https://review.coreboot.org/15216
Tested-by: build bot (Jenkins)
Reviewed-by: Julius Werner <jwerner@chromium.org>
Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net>
Diffstat (limited to 'src/commonlib/lz4_wrapper.c')
-rw-r--r-- | src/commonlib/lz4_wrapper.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/src/commonlib/lz4_wrapper.c b/src/commonlib/lz4_wrapper.c index 93fa7e8e1d..0342868d74 100644 --- a/src/commonlib/lz4_wrapper.c +++ b/src/commonlib/lz4_wrapper.c @@ -1,5 +1,5 @@ /* - * Copyright 2015 Google Inc. + * Copyright 2015-2016 Google Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -54,14 +54,18 @@ static void LZ4_copy8(void *dst, const void *src) ((uint8_t *)dst)[i] = ((uint8_t *)src)[i]; #else uint32_t x0, x1; - __asm__ volatile ( - "ldr %[x0], [%[src]]\n\t" - "ldr %[x1], [%[src], #4]\n\t" - "str %[x0], [%[dst]]\n\t" - "str %[x1], [%[dst], #4]\n\t" - : [x0]"=r"(x0), [x1]"=r"(x1) - : [src]"r"(src), [dst]"r"(dst) - : "memory" ); + __asm__ ("ldr %[x0], [%[src]]" + : [x0]"=r"(x0) + : [src]"r"(src), "m"(*(const uint32_t *)src)); + __asm__ ("ldr %[x1], [%[src], #4]" + : [x1]"=r"(x1) + : [src]"r"(src), "m"(*(const uint32_t *)(src + 4))); + __asm__ ("str %[x0], [%[dst]]" + : "=m"(*(uint32_t *)dst) + : [x0]"r"(x0), [dst]"r"(dst)); + __asm__ ("str %[x1], [%[dst], #4]" + : "=m"(*(uint32_t *)(dst + 4)) + : [x1]"r"(x1), [dst]"r"(dst)); #endif #elif defined(__riscv__) /* RISC-V implementations may trap on any unaligned access. */ |