summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hendricks <dhendrix@chromium.org>2013-03-07 00:58:10 +0100
committerRonald G. Minnich <rminnich@gmail.com>2013-03-07 06:45:43 +0100
commit147cdc3b171c8f02434dc3b6bbd70b6406de93ee (patch)
tree275029b14043e06dd2a26593b74f47b7eb31ab15
parentd9b16f3b048243fd7d4c4513875f7f241261ce50 (diff)
Revert "ARMv7: Simplify div64"
This reverts commit 1cd616082100f47dc2d6d73669c6aa2e5eb039ad Division bites us again. I don't know how or why, but printk() seems to break (again) with this patch. I'm surprised we didn't encounter problems earlier on... Change-Id: I81cb9f20879f5eb73a76e1af47b96a68d1e81dc8 TODO: Find a better solution for div64. This one is too painful, but seems necessary for now (and sort-of works with our vtxprintf hack). Reviewed-on: http://review.coreboot.org/2600 Tested-by: build bot (Jenkins) Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
-rw-r--r--src/arch/armv7/include/div64.h186
-rw-r--r--src/arch/armv7/lib/Makefile.inc2
-rw-r--r--src/arch/armv7/lib/div64.S9
3 files changed, 190 insertions, 7 deletions
diff --git a/src/arch/armv7/include/div64.h b/src/arch/armv7/include/div64.h
index 9c3c29a63f..5baed2b294 100644
--- a/src/arch/armv7/include/div64.h
+++ b/src/arch/armv7/include/div64.h
@@ -3,7 +3,12 @@
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
+//#include <asm/system.h>
+//#include <linux/types.h>
+// FIXME
+
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#define __LINUX_ARM_ARCH__ 7
/*
* The semantics of do_div() are:
@@ -29,7 +34,7 @@
#define __xh "r1"
#endif
-#define do_div(n, base) \
+#define __do_div_asm(n, base) \
({ \
register unsigned int __base asm("r4") = base; \
register unsigned long long __n asm("r0") = n; \
@@ -46,4 +51,183 @@
n = __res; \
__rem; \
})
+
+#if __GNUC__ < 4
+
+/*
+ * gcc versions earlier than 4.0 are simply too problematic for the
+ * optimized implementation below. First there is gcc PR 15089 that
+ * tend to trig on more complex constructs, spurious .global __udivsi3
+ * are inserted even if none of those symbols are referenced in the
+ * generated code, and those gcc versions are not able to do constant
+ * propagation on long long values anyway.
+ */
+#define do_div(n, base) __do_div_asm(n, base)
+
+#elif __GNUC__ >= 4
+
+//#include <asm/bug.h>
+
+/*
+ * If the divisor happens to be constant, we determine the appropriate
+ * inverse at compile time to turn the division into a few inline
+ * multiplications instead which is much faster. And yet only if compiling
+ * for ARMv4 or higher (we need umull/umlal) and if the gcc version is
+ * sufficiently recent to perform proper long long constant propagation.
+ * (It is unfortunate that gcc doesn't perform all this internally.)
+ */
+#define do_div(n, base) \
+({ \
+ unsigned int __r, __b = (base); \
+ if (!__builtin_constant_p(__b) || __b == 0 || \
+ (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \
+ /* non-constant divisor (or zero): slow path */ \
+ __r = __do_div_asm(n, __b); \
+ } else if ((__b & (__b - 1)) == 0) { \
+ /* Trivial: __b is constant and a power of 2 */ \
+ /* gcc does the right thing with this code. */ \
+ __r = n; \
+ __r &= (__b - 1); \
+ n /= __b; \
+ } else { \
+ /* Multiply by inverse of __b: n/b = n*(p/b)/p */ \
+ /* We rely on the fact that most of this code gets */ \
+ /* optimized away at compile time due to constant */ \
+ /* propagation and only a couple inline assembly */ \
+ /* instructions should remain. Better avoid any */ \
+ /* code construct that might prevent that. */ \
+ unsigned long long __res, __x, __t, __m, __n = n; \
+ unsigned int __c, __p, __z = 0; \
+ /* preserve low part of n for reminder computation */ \
+ __r = __n; \
+ /* determine number of bits to represent __b */ \
+ __p = 1 << __div64_fls(__b); \
+ /* compute __m = ((__p << 64) + __b - 1) / __b */ \
+ __m = (~0ULL / __b) * __p; \
+ __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \
+ /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \
+ __x = ~0ULL / __b * __b - 1; \
+ __res = (__m & 0xffffffff) * (__x & 0xffffffff); \
+ __res >>= 32; \
+ __res += (__m & 0xffffffff) * (__x >> 32); \
+ __t = __res; \
+ __res += (__x & 0xffffffff) * (__m >> 32); \
+ __t = (__res < __t) ? (1ULL << 32) : 0; \
+ __res = (__res >> 32) + __t; \
+ __res += (__m >> 32) * (__x >> 32); \
+ __res /= __p; \
+ /* Now sanitize and optimize what we've got. */ \
+ if (~0ULL % (__b / (__b & -__b)) == 0) { \
+ /* those cases can be simplified with: */ \
+ __n /= (__b & -__b); \
+ __m = ~0ULL / (__b / (__b & -__b)); \
+ __p = 1; \
+ __c = 1; \
+ } else if (__res != __x / __b) { \
+ /* We can't get away without a correction */ \
+ /* to compensate for bit truncation errors. */ \
+ /* To avoid it we'd need an additional bit */ \
+ /* to represent __m which would overflow it. */ \
+ /* Instead we do m=p/b and n/b=(n*m+m)/p. */ \
+ __c = 1; \
+ /* Compute __m = (__p << 64) / __b */ \
+ __m = (~0ULL / __b) * __p; \
+ __m += ((~0ULL % __b + 1) * __p) / __b; \
+ } else { \
+ /* Reduce __m/__p, and try to clear bit 31 */ \
+ /* of __m when possible otherwise that'll */ \
+ /* need extra overflow handling later. */ \
+ unsigned int __bits = -(__m & -__m); \
+ __bits |= __m >> 32; \
+ __bits = (~__bits) << 1; \
+ /* If __bits == 0 then setting bit 31 is */ \
+ /* unavoidable. Simply apply the maximum */ \
+ /* possible reduction in that case. */ \
+ /* Otherwise the MSB of __bits indicates the */ \
+ /* best reduction we should apply. */ \
+ if (!__bits) { \
+ __p /= (__m & -__m); \
+ __m /= (__m & -__m); \
+ } else { \
+ __p >>= __div64_fls(__bits); \
+ __m >>= __div64_fls(__bits); \
+ } \
+ /* No correction needed. */ \
+ __c = 0; \
+ } \
+ /* Now we have a combination of 2 conditions: */ \
+ /* 1) whether or not we need a correction (__c), and */ \
+ /* 2) whether or not there might be an overflow in */ \
+ /* the cross product (__m & ((1<<63) | (1<<31))) */ \
+ /* Select the best insn combination to perform the */ \
+ /* actual __m * __n / (__p << 64) operation. */ \
+ if (!__c) { \
+ asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
+ "mov %Q0, #0" \
+ : "=&r" (__res) \
+ : "r" (__m), "r" (__n) \
+ : "cc" ); \
+ } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
+ __res = __m; \
+ asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \
+ "mov %Q0, #0" \
+ : "+&r" (__res) \
+ : "r" (__m), "r" (__n) \
+ : "cc" ); \
+ } else { \
+ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
+ "cmn %Q0, %Q1\n\t" \
+ "adcs %R0, %R0, %R1\n\t" \
+ "adc %Q0, %3, #0" \
+ : "=&r" (__res) \
+ : "r" (__m), "r" (__n), "r" (__z) \
+ : "cc" ); \
+ } \
+ if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
+ asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \
+ "umlal %R0, %Q0, %Q1, %R2\n\t" \
+ "mov %R0, #0\n\t" \
+ "umlal %Q0, %R0, %R1, %R2" \
+ : "+&r" (__res) \
+ : "r" (__m), "r" (__n) \
+ : "cc" ); \
+ } else { \
+ asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \
+ "umlal %R0, %1, %Q2, %R3\n\t" \
+ "mov %R0, #0\n\t" \
+ "adds %Q0, %1, %Q0\n\t" \
+ "adc %R0, %R0, #0\n\t" \
+ "umlal %Q0, %R0, %R2, %R3" \
+ : "+&r" (__res), "+&r" (__z) \
+ : "r" (__m), "r" (__n) \
+ : "cc" ); \
+ } \
+ __res /= __p; \
+ /* The reminder can be computed with 32-bit regs */ \
+ /* only, and gcc is good at that. */ \
+ { \
+ unsigned int __res0 = __res; \
+ unsigned int __b0 = __b; \
+ __r -= __res0 * __b0; \
+ } \
+ /* BUG_ON(__r >= __b || __res * __b + __r != n); */ \
+ n = __res; \
+ } \
+ __r; \
+})
+
+/* our own fls implementation to make sure constant propagation is fine */
+#define __div64_fls(bits) \
+({ \
+ unsigned int __left = (bits), __nr = 0; \
+ if (__left & 0xffff0000) __nr += 16, __left >>= 16; \
+ if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \
+ if (__left & 0x000000f0) __nr += 4, __left >>= 4; \
+ if (__left & 0x0000000c) __nr += 2, __left >>= 2; \
+ if (__left & 0x00000002) __nr += 1; \
+ __nr; \
+})
+
+#endif
+
#endif
diff --git a/src/arch/armv7/lib/Makefile.inc b/src/arch/armv7/lib/Makefile.inc
index 0e81c997a0..388864aa28 100644
--- a/src/arch/armv7/lib/Makefile.inc
+++ b/src/arch/armv7/lib/Makefile.inc
@@ -2,8 +2,6 @@ bootblock-y += syslib.c
bootblock-$(CONFIG_EARLY_CONSOLE) += early_console.c
bootblock-y += cache_v7.c
bootblock-y += cache-cp15.c
-bootblock-y += div0.c
-bootblock-y += div64.S
romstage-y += cache_v7.c
romstage-y += cache-cp15.c
diff --git a/src/arch/armv7/lib/div64.S b/src/arch/armv7/lib/div64.S
index 41a094941e..44edf480ca 100644
--- a/src/arch/armv7/lib/div64.S
+++ b/src/arch/armv7/lib/div64.S
@@ -13,7 +13,8 @@
*/
// FIXME
-#define __ARM_ARCH__ 7
+//#include <linux/linkage.h>
+#define __LINUX_ARM_ARCH__ 7
#ifdef __ARMEB__
#define xh r0
@@ -64,7 +65,7 @@ __do_div64:
@ The aligned divisor is stored in yl preserving the original.
@ The bit position is stored in ip.
-#if __ARM_ARCH__ >= 5
+#if __LINUX_ARM_ARCH__ >= 5
clz yl, r4
clz ip, xh
@@ -124,7 +125,7 @@ __do_div64:
@ We still have remainer bits in the low part. Bring them up.
-#if __ARM_ARCH__ >= 5
+#if __LINUX_ARM_ARCH__ >= 5
clz xh, xl @ we know xh is zero here so...
add xh, xh, #1
@@ -151,7 +152,7 @@ __do_div64:
8: @ Division by a power of 2: determine what that divisor order is
@ then simply shift values around
-#if __ARM_ARCH__ >= 5
+#if __LINUX_ARM_ARCH__ >= 5
clz ip, r4
rsb ip, ip, #31