diff options
author | Stefan Reinauer <stepan@coresystems.de> | 2008-02-15 18:16:06 +0000 |
---|---|---|
committer | Stefan Reinauer <stepan@openbios.org> | 2008-02-15 18:16:06 +0000 |
commit | b34eea348cb7d6d9c93d17d51a1f322114b8f15d (patch) | |
tree | 38dd85932dacaa7c7a174d87307baff6210d70c4 /util/mkelfImage/kunzip_src/arch/alpha/lib | |
parent | 46fc14dcc8cdba1d66ae6fa9fdcbbf33265d676e (diff) |
Importing mkelfimage from
ftp://ftp.lnxi.com/pub/mkelfImage/mkelfImage-2.7.tar.gz
Signed-off-by: Stefan Reinauer <stepan@coresystems.de>
Acked-by: Stefan Reinauer <stepan@coresystems.de>
git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3103 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'util/mkelfImage/kunzip_src/arch/alpha/lib')
-rw-r--r-- | util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile | 17 | ||||
-rw-r--r-- | util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S | 195 | ||||
-rw-r--r-- | util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds | 50 | ||||
-rw-r--r-- | util/mkelfImage/kunzip_src/arch/alpha/lib/start.S | 89 |
4 files changed, 351 insertions, 0 deletions
diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile b/util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile new file mode 100644 index 0000000000..2fd15b06a0 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile @@ -0,0 +1,17 @@ +ARCH_OPTIONS= + +OBJECTS += __divqu.o __remqu.o __divlu.o __remlu.o + +$(OBJDIR)/__divqu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DDIV -c -o $@ $^ + +$(OBJDIR)/__remqu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DREM -c -o $@ $^ + +$(OBJDIR)/__divlu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DDIV -DINTSIZE -c -o $@ $^ + +$(OBJDIR)/__remlu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DREM -DINTSIZE -c -o $@ $^ + + diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S b/util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S new file mode 100644 index 0000000000..a4bc1f751e --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S @@ -0,0 +1,195 @@ +/* + * cpu/ev6/divide.S + * + * (C) 1995 Linus Torvalds + * + * Alpha division.. + */ + +/* + * The alpha chip doesn't provide hardware division, so we have to do it + * by hand. The compiler expects the functions + * + * __divqu: 64-bit unsigned long divide + * __remqu: 64-bit unsigned long remainder + * __divqs/__remqs: signed 64-bit + * __divlu/__remlu: unsigned 32-bit + * __divls/__remls: signed 32-bit + * + * These are not normal C functions: instead of the normal + * calling sequence, these expect their arguments in registers + * $24 and $25, and return the result in $27. Register $28 may + * be clobbered (assembly temporary), anything else must be saved. + * + * In short: painful. + * + * This is a rather simple bit-at-a-time algorithm: it's very good + * at dividing random 64-bit numbers, but the more usual case where + * the divisor is small is handled better by the DEC algorithm + * using lookup tables. This uses much less memory, though, and is + * nicer on the cache.. Besides, I don't know the copyright status + * of the DEC code. + */ + +/* + * My temporaries: + * $0 - current bit + * $1 - shifted divisor + * $2 - modulus/quotient + * + * $23 - return address + * $24 - dividend + * $25 - divisor + * + * $27 - quotient/modulus + * $28 - compare status + */ + +#define halt .long 0 + +/* + * Select function type and registers + */ +#define mask $0 +#define divisor $1 +#define compare $28 +#define tmp1 $3 +#define tmp2 $4 + +#ifdef DIV +#define DIV_ONLY(x,y...) x,##y +#define MOD_ONLY(x,y...) +#define func(x) __div##x +#define modulus $2 +#define quotient $27 +#define GETSIGN(x) xor $24,$25,x +#define STACK 48 +#else +#define DIV_ONLY(x,y...) +#define MOD_ONLY(x,y...) x,##y +#define func(x) __rem##x +#define modulus $27 +#define quotient $2 +#define GETSIGN(x) bis $24,$24,x +#define STACK 32 +#endif + +/* + * For 32-bit operations, we need to extend to 64-bit + */ +#ifdef INTSIZE +#define ufunction func(lu) +#define sfunction func(l) +#define LONGIFY(x) zapnot x,15,x +#define SLONGIFY(x) addl x,0,x +#else +#define ufunction func(qu) +#define sfunction func(q) +#define LONGIFY(x) +#define SLONGIFY(x) +#endif + +.set noat +.align 3 +.globl ufunction +.ent ufunction +ufunction: + subq $30,STACK,$30 + .frame $30,STACK,$23 + .prologue 0 + +7: stq $1, 0($30) + bis $25,$25,divisor + stq $2, 8($30) + bis $24,$24,modulus + stq $0,16($30) + bis $31,$31,quotient + LONGIFY(divisor) + stq tmp1,24($30) + LONGIFY(modulus) + bis $31,1,mask + DIV_ONLY(stq tmp2,32($30)) + beq divisor, 9f /* div by zero */ + +#ifdef INTSIZE + /* + * shift divisor left, using 3-bit shifts for + * 32-bit divides as we can't overflow. Three-bit + * shifts will result in looping three times less + * here, but can result in two loops more later. + * Thus using a large shift isn't worth it (and + * s8add pairs better than a sll..) + */ +1: cmpult divisor,modulus,compare + s8addq divisor,$31,divisor + s8addq mask,$31,mask + bne compare,1b +#else +1: cmpult divisor,modulus,compare + blt divisor, 2f + addq divisor,divisor,divisor + addq mask,mask,mask + bne compare,1b + unop +#endif + + /* ok, start to go right again.. */ +2: DIV_ONLY(addq quotient,mask,tmp2) + srl mask,1,mask + cmpule divisor,modulus,compare + subq modulus,divisor,tmp1 + DIV_ONLY(cmovne compare,tmp2,quotient) + srl divisor,1,divisor + cmovne compare,tmp1,modulus + bne mask,2b + +9: ldq $1, 0($30) + ldq $2, 8($30) + ldq $0,16($30) + ldq tmp1,24($30) + DIV_ONLY(ldq tmp2,32($30)) + addq $30,STACK,$30 + ret $31,($23),1 + .end ufunction + +/* + * Uhh.. Ugly signed division. I'd rather not have it at all, but + * it's needed in some circumstances. There are different ways to + * handle this, really. This does: + * -a / b = a / -b = -(a / b) + * -a % b = -(a % b) + * a % -b = a % b + * which is probably not the best solution, but at least should + * have the property that (x/y)*y + (x%y) = x. + */ +.align 3 +.globl sfunction +.ent sfunction +sfunction: + subq $30,STACK,$30 + .frame $30,STACK,$23 + .prologue 0 + bis $24,$25,$28 + SLONGIFY($28) + bge $28,7b + stq $24,0($30) + subq $31,$24,$28 + stq $25,8($30) + cmovlt $24,$28,$24 /* abs($24) */ + stq $23,16($30) + subq $31,$25,$28 + stq tmp1,24($30) + cmovlt $25,$28,$25 /* abs($25) */ + unop + bsr $23,ufunction + ldq $24,0($30) + ldq $25,8($30) + GETSIGN($28) + subq $31,$27,tmp1 + SLONGIFY($28) + ldq $23,16($30) + cmovlt $28,tmp1,$27 + ldq tmp1,24($30) + addq $30,STACK,$30 + ret $31,($23),1 + .end sfunction diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds b/util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds new file mode 100644 index 0000000000..ed632686a8 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds @@ -0,0 +1,50 @@ +PAGE_SIZE = 65536; +BASIC_ALIGN = 8; +OUTPUT_FORMAT("elf64-alpha") +ENTRY(__start) +SECTIONS +{ + . = PAGE_SIZE; + _start = .; + /* + * First we place the code and read only data (typically const declared). + * This get placed in rom. + */ + .text : { + _text = .; + *(.text) + _etext = .; + _rodata = .; + *(.rodata); + _erodata = .; + } + /* Global data */ + .data : { + _data = .; + *(.data) + CONSTRUCTORS + *(.got) + *(.sdata) + _edata = .; + } + + /* Important align _bss so bss may be zeroed with quadword access */ + . = ALIGN(BASIC_ALIGN); + .bss : { + _bss = .; + *(.sbss) + *(.scommon) + *(.bss) + *(COMMON) + *(.heap) + *(.stack) + /* Important align _ebss so bss may be zeroed with quadword access */ + . = ALIGN(BASIC_ALIGN); + _ebss = .; + } + _end = .; + + /DISCARD/ : { + *(*) + } +} diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/start.S b/util/mkelfImage/kunzip_src/arch/alpha/lib/start.S new file mode 100644 index 0000000000..a89f4ca6de --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/start.S @@ -0,0 +1,89 @@ +.set noat +.set noreorder +.text + +__original_registers: + .quad 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + +__entry: + .quad entry + +.globl __start +__start: + br $27, __save_registers +__save_registers: + lda $27, (__original_registers - __save_registers)($27) + stq $0, 0($27) + stq $1, 8($27) + stq $2, 16($27) + stq $3, 24($27) + stq $4, 32($27) + stq $5, 40($27) + stq $6, 48($27) + stq $7, 56($27) + stq $8, 64($27) + stq $9, 72($27) + stq $10, 80($27) + stq $11, 88($27) + stq $12, 96($27) + stq $13, 104($27) + stq $14, 112($27) + stq $15, 120($27) + stq $16, 128($27) + stq $17, 136($27) + stq $18, 144($27) + stq $19, 152($27) + stq $20, 160($27) + stq $21, 168($27) + stq $22, 176($27) + stq $23, 184($27) + stq $24, 192($27) + stq $25, 200($27) + stq $26, 208($27) + stq $28, 224($27) + stq $29, 232($27) + stq $30, 240($27) + +__normal_start: + ldgp $29, (__normal_start - __original_registers)($27) + lda $30, _estack + jsr $26, kunzip + +.globl jmp_to_program_entry +jmp_to_program_entry: + br $27, __restore_registers +__restore_registers: + lda $27,(__original_registers - __restore_registers)($27) + stq $16, (__entry - __original_registers)($27) + ldq $0, 0($27) + ldq $1, 8($27) + ldq $2, 16($27) + ldq $3, 24($27) + ldq $4, 32($27) + ldq $5, 40($27) + ldq $6, 48($27) + ldq $7, 56($27) + ldq $8, 64($27) + ldq $9, 72($27) + ldq $10, 80($27) + ldq $11, 88($27) + ldq $12, 96($27) + ldq $13, 104($27) + ldq $14, 112($27) + ldq $15, 120($27) + ldq $16, 128($27) + ldq $17, 136($27) + ldq $18, 144($27) + ldq $19, 152($27) + ldq $20, 160($27) + ldq $21, 168($27) + ldq $22, 176($27) + ldq $23, 184($27) + ldq $24, 192($27) + ldq $25, 200($27) + ldq $26, 208($27) + ldq $28, 224($27) + ldq $29, 232($27) + ldq $30, 240($27) + ldq $27, (__entry - __original_registers)($27) + jsr $31, ($27) |