From b34eea348cb7d6d9c93d17d51a1f322114b8f15d Mon Sep 17 00:00:00 2001 From: Stefan Reinauer Date: Fri, 15 Feb 2008 18:16:06 +0000 Subject: Importing mkelfimage from ftp://ftp.lnxi.com/pub/mkelfImage/mkelfImage-2.7.tar.gz Signed-off-by: Stefan Reinauer Acked-by: Stefan Reinauer git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3103 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1 --- .../kunzip_src/arch/alpha/include/stddef.h | 15 + .../kunzip_src/arch/alpha/include/stdint.h | 53 + .../kunzip_src/arch/alpha/include/va-alpha.h | 128 +++ util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile | 17 + util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S | 195 ++++ .../kunzip_src/arch/alpha/lib/kunzip.lds | 50 + util/mkelfImage/kunzip_src/arch/alpha/lib/start.S | 89 ++ .../kunzip_src/arch/i386/include/stddef.h | 15 + .../kunzip_src/arch/i386/include/stdint.h | 52 + util/mkelfImage/kunzip_src/arch/i386/lib/Makefile | 1 + .../mkelfImage/kunzip_src/arch/i386/lib/kunzip.lds | 50 + util/mkelfImage/kunzip_src/arch/i386/lib/start.S | 36 + util/mkelfImage/kunzip_src/include/stdarg.h | 205 ++++ util/mkelfImage/kunzip_src/include/stdlib.h | 14 + util/mkelfImage/kunzip_src/include/string.h | 31 + util/mkelfImage/kunzip_src/include/types.h | 18 + util/mkelfImage/kunzip_src/lib/inflate.c | 1178 ++++++++++++++++++++ util/mkelfImage/kunzip_src/lib/kunzip.c | 170 +++ util/mkelfImage/kunzip_src/lib/malloc.c | 42 + util/mkelfImage/kunzip_src/lib/memcmp.c | 17 + util/mkelfImage/kunzip_src/lib/memcpy.c | 11 + util/mkelfImage/kunzip_src/lib/memset.c | 12 + 22 files changed, 2399 insertions(+) create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/include/stddef.h create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/include/stdint.h create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/include/va-alpha.h create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds create mode 100644 util/mkelfImage/kunzip_src/arch/alpha/lib/start.S create mode 100644 util/mkelfImage/kunzip_src/arch/i386/include/stddef.h create mode 100644 util/mkelfImage/kunzip_src/arch/i386/include/stdint.h create mode 100644 util/mkelfImage/kunzip_src/arch/i386/lib/Makefile create mode 100644 util/mkelfImage/kunzip_src/arch/i386/lib/kunzip.lds create mode 100644 util/mkelfImage/kunzip_src/arch/i386/lib/start.S create mode 100644 util/mkelfImage/kunzip_src/include/stdarg.h create mode 100644 util/mkelfImage/kunzip_src/include/stdlib.h create mode 100644 util/mkelfImage/kunzip_src/include/string.h create mode 100644 util/mkelfImage/kunzip_src/include/types.h create mode 100644 util/mkelfImage/kunzip_src/lib/inflate.c create mode 100644 util/mkelfImage/kunzip_src/lib/kunzip.c create mode 100644 util/mkelfImage/kunzip_src/lib/malloc.c create mode 100644 util/mkelfImage/kunzip_src/lib/memcmp.c create mode 100644 util/mkelfImage/kunzip_src/lib/memcpy.c create mode 100644 util/mkelfImage/kunzip_src/lib/memset.c (limited to 'util/mkelfImage/kunzip_src') diff --git a/util/mkelfImage/kunzip_src/arch/alpha/include/stddef.h b/util/mkelfImage/kunzip_src/arch/alpha/include/stddef.h new file mode 100644 index 0000000000..37ee13858a --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/include/stddef.h @@ -0,0 +1,15 @@ +#ifndef ALPHA_STDDEF_H +#define ALPHA_STDDEF_H + +typedef long ptrdiff_t; +typedef unsigned long size_t; +typedef long ssize_t; + +typedef int wchar_t; +typedef unsigned int wint_t; + +#define NULL 0 + +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) + +#endif /* ALPHA_STDDEF_H */ diff --git a/util/mkelfImage/kunzip_src/arch/alpha/include/stdint.h b/util/mkelfImage/kunzip_src/arch/alpha/include/stdint.h new file mode 100644 index 0000000000..9b93a0c84f --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/include/stdint.h @@ -0,0 +1,53 @@ +#ifndef ALPHA_STDINT_H +#define ALPHA_STDINT_H + +/* Exact integral types */ +typedef unsigned char uint8_t; +typedef signed char int8_t; + +typedef unsigned short uint16_t; +typedef signed short int16_t; + +typedef unsigned int uint32_t; +typedef signed int int32_t; + +typedef unsigned long uint64_t; +typedef signed long int64_t; + + +/* Small types */ +typedef unsigned char uint_least8_t; +typedef signed char int_least8_t; + +typedef unsigned short uint_least16_t; +typedef signed short int_least16_t; + +typedef unsigned int uint_least32_t; +typedef signed int int_least32_t; + +typedef unsigned long uint_least64_t; +typedef signed long int_least64_t; + +/* Fast Types */ +typedef unsigned char uint_fast8_t; +typedef signed char int_fast8_t; + +typedef unsigned long uint_fast16_t; +typedef signed long int_fast16_t; + +typedef unsigned long uint_fast32_t; +typedef signed long int_fast32_t; + +typedef unsigned long uint_fast64_t; +typedef signed long int_fast64_t; + +/* Types for `void *' pointers. */ +typedef long intptr_t; +typedef unsigned long uintptr_t; + +/* Largest integral types */ +typedef long intmax_t; +typedef unsigned long uintmax_t; + + +#endif /* ALPHA_STDINT_H */ diff --git a/util/mkelfImage/kunzip_src/arch/alpha/include/va-alpha.h b/util/mkelfImage/kunzip_src/arch/alpha/include/va-alpha.h new file mode 100644 index 0000000000..2528a712ad --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/include/va-alpha.h @@ -0,0 +1,128 @@ +/* GNU C varargs and stdargs support for the DEC Alpha. */ + +/* Note: We must use the name __builtin_savregs. GCC attaches special + significance to that name. In particular, regardless of where in a + function __builtin_saveregs is called, GCC moves the call up to the + very start of the function. */ + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST + +/* In VMS, __gnuc_va_list is simply char *; on OSF, it's a structure. */ + +#ifdef __VMS__ +typedef char *__gnuc_va_list; +#else + +typedef struct { + char *__base; /* Pointer to first integer register. */ + int __offset; /* Byte offset of args so far. */ +} __gnuc_va_list; +#endif + +#endif /* __GNUC_VA_LIST */ + +/* If this is for internal libc use, don't define anything but + __gnuc_va_list. */ + +#if !defined(__GNUC_VA_LIST_1) && (defined (_STDARG_H) || defined (_VARARGS_H)) +#define __GNUC_VA_LIST_1 + +#define _VA_LIST +#define _VA_LIST_ + +typedef __gnuc_va_list va_list; + +#if !defined(_STDARG_H) + +/* varargs support */ +#define va_alist __builtin_va_alist +#define va_dcl int __builtin_va_alist;... +#ifdef __VMS__ +#define va_start(pvar) ((pvar) = __builtin_saveregs ()) +#else +#define va_start(pvar) ((pvar) = * (__gnuc_va_list *) __builtin_saveregs ()) +#endif + +#else /* STDARG.H */ + +/* ANSI alternative. */ + +/* Call __builtin_next_arg even though we aren't using its value, so that + we can verify that firstarg is correct. */ + +#ifdef __VMS__ +#define va_start(pvar, firstarg) \ + (__builtin_next_arg (firstarg), \ + (pvar) = __builtin_saveregs ()) +#else +#define va_start(pvar, firstarg) \ + (__builtin_next_arg (firstarg), \ + (pvar) = *(__gnuc_va_list *) __builtin_saveregs ()) +#endif + +#endif /* _STDARG_H */ + +#define va_end(__va) ((void) 0) + +/* Values returned by __builtin_classify_type. */ + +enum { + __no_type_class = -1, + __void_type_class, + __integer_type_class, + __char_type_class, + __enumeral_type_class, + __boolean_type_class, + __pointer_type_class, + __reference_type_class, + __offset_type_class, + __real_type_class, + __complex_type_class, + __function_type_class, + __method_type_class, + __record_type_class, + __union_type_class, + __array_type_class, + __string_type_class, + __set_type_class, + __file_type_class, + __lang_type_class +}; + +/* Note that parameters are always aligned at least to a word boundary + (when passed) regardless of what GCC's __alignof__ operator says. */ + +/* Avoid errors if compiling GCC v2 with GCC v1. */ +#if __GNUC__ == 1 +#define __extension__ +#endif + +/* Get the size of a type in bytes, rounded up to an integral number + of words. */ + +#define __va_tsize(__type) \ + (((sizeof (__type) + __extension__ sizeof (long long) - 1) \ + / __extension__ sizeof (long long)) * __extension__ sizeof (long long)) + +#ifdef __VMS__ +#define va_arg(__va, __type) \ +(*(((__va) += __va_tsize (__type)), \ + (__type *)(void *)((__va) - __va_tsize (__type)))) + +#else + +#define va_arg(__va, __type) \ +(*(((__va).__offset += __va_tsize (__type)), \ + (__type *)(void *)((__va).__base + (__va).__offset \ + - (((__builtin_classify_type (* (__type *) 0) \ + == __real_type_class) && (__va).__offset <= (6 * 8)) \ + ? (6 * 8) + 8 : __va_tsize (__type))))) +#endif + +/* Copy __gnuc_va_list into another variable of this type. */ +#define __va_copy(dest, src) (dest) = (src) + +#endif /* __GNUC_VA_LIST_1 */ diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile b/util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile new file mode 100644 index 0000000000..2fd15b06a0 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/Makefile @@ -0,0 +1,17 @@ +ARCH_OPTIONS= + +OBJECTS += __divqu.o __remqu.o __divlu.o __remlu.o + +$(OBJDIR)/__divqu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DDIV -c -o $@ $^ + +$(OBJDIR)/__remqu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DREM -c -o $@ $^ + +$(OBJDIR)/__divlu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DDIV -DINTSIZE -c -o $@ $^ + +$(OBJDIR)/__remlu.o: $(SRC)/arch/alpha/lib/divide.S + $(CC) $(CFLAGS) -DREM -DINTSIZE -c -o $@ $^ + + diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S b/util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S new file mode 100644 index 0000000000..a4bc1f751e --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/divide.S @@ -0,0 +1,195 @@ +/* + * cpu/ev6/divide.S + * + * (C) 1995 Linus Torvalds + * + * Alpha division.. + */ + +/* + * The alpha chip doesn't provide hardware division, so we have to do it + * by hand. The compiler expects the functions + * + * __divqu: 64-bit unsigned long divide + * __remqu: 64-bit unsigned long remainder + * __divqs/__remqs: signed 64-bit + * __divlu/__remlu: unsigned 32-bit + * __divls/__remls: signed 32-bit + * + * These are not normal C functions: instead of the normal + * calling sequence, these expect their arguments in registers + * $24 and $25, and return the result in $27. Register $28 may + * be clobbered (assembly temporary), anything else must be saved. + * + * In short: painful. + * + * This is a rather simple bit-at-a-time algorithm: it's very good + * at dividing random 64-bit numbers, but the more usual case where + * the divisor is small is handled better by the DEC algorithm + * using lookup tables. This uses much less memory, though, and is + * nicer on the cache.. Besides, I don't know the copyright status + * of the DEC code. + */ + +/* + * My temporaries: + * $0 - current bit + * $1 - shifted divisor + * $2 - modulus/quotient + * + * $23 - return address + * $24 - dividend + * $25 - divisor + * + * $27 - quotient/modulus + * $28 - compare status + */ + +#define halt .long 0 + +/* + * Select function type and registers + */ +#define mask $0 +#define divisor $1 +#define compare $28 +#define tmp1 $3 +#define tmp2 $4 + +#ifdef DIV +#define DIV_ONLY(x,y...) x,##y +#define MOD_ONLY(x,y...) +#define func(x) __div##x +#define modulus $2 +#define quotient $27 +#define GETSIGN(x) xor $24,$25,x +#define STACK 48 +#else +#define DIV_ONLY(x,y...) +#define MOD_ONLY(x,y...) x,##y +#define func(x) __rem##x +#define modulus $27 +#define quotient $2 +#define GETSIGN(x) bis $24,$24,x +#define STACK 32 +#endif + +/* + * For 32-bit operations, we need to extend to 64-bit + */ +#ifdef INTSIZE +#define ufunction func(lu) +#define sfunction func(l) +#define LONGIFY(x) zapnot x,15,x +#define SLONGIFY(x) addl x,0,x +#else +#define ufunction func(qu) +#define sfunction func(q) +#define LONGIFY(x) +#define SLONGIFY(x) +#endif + +.set noat +.align 3 +.globl ufunction +.ent ufunction +ufunction: + subq $30,STACK,$30 + .frame $30,STACK,$23 + .prologue 0 + +7: stq $1, 0($30) + bis $25,$25,divisor + stq $2, 8($30) + bis $24,$24,modulus + stq $0,16($30) + bis $31,$31,quotient + LONGIFY(divisor) + stq tmp1,24($30) + LONGIFY(modulus) + bis $31,1,mask + DIV_ONLY(stq tmp2,32($30)) + beq divisor, 9f /* div by zero */ + +#ifdef INTSIZE + /* + * shift divisor left, using 3-bit shifts for + * 32-bit divides as we can't overflow. Three-bit + * shifts will result in looping three times less + * here, but can result in two loops more later. + * Thus using a large shift isn't worth it (and + * s8add pairs better than a sll..) + */ +1: cmpult divisor,modulus,compare + s8addq divisor,$31,divisor + s8addq mask,$31,mask + bne compare,1b +#else +1: cmpult divisor,modulus,compare + blt divisor, 2f + addq divisor,divisor,divisor + addq mask,mask,mask + bne compare,1b + unop +#endif + + /* ok, start to go right again.. */ +2: DIV_ONLY(addq quotient,mask,tmp2) + srl mask,1,mask + cmpule divisor,modulus,compare + subq modulus,divisor,tmp1 + DIV_ONLY(cmovne compare,tmp2,quotient) + srl divisor,1,divisor + cmovne compare,tmp1,modulus + bne mask,2b + +9: ldq $1, 0($30) + ldq $2, 8($30) + ldq $0,16($30) + ldq tmp1,24($30) + DIV_ONLY(ldq tmp2,32($30)) + addq $30,STACK,$30 + ret $31,($23),1 + .end ufunction + +/* + * Uhh.. Ugly signed division. I'd rather not have it at all, but + * it's needed in some circumstances. There are different ways to + * handle this, really. This does: + * -a / b = a / -b = -(a / b) + * -a % b = -(a % b) + * a % -b = a % b + * which is probably not the best solution, but at least should + * have the property that (x/y)*y + (x%y) = x. + */ +.align 3 +.globl sfunction +.ent sfunction +sfunction: + subq $30,STACK,$30 + .frame $30,STACK,$23 + .prologue 0 + bis $24,$25,$28 + SLONGIFY($28) + bge $28,7b + stq $24,0($30) + subq $31,$24,$28 + stq $25,8($30) + cmovlt $24,$28,$24 /* abs($24) */ + stq $23,16($30) + subq $31,$25,$28 + stq tmp1,24($30) + cmovlt $25,$28,$25 /* abs($25) */ + unop + bsr $23,ufunction + ldq $24,0($30) + ldq $25,8($30) + GETSIGN($28) + subq $31,$27,tmp1 + SLONGIFY($28) + ldq $23,16($30) + cmovlt $28,tmp1,$27 + ldq tmp1,24($30) + addq $30,STACK,$30 + ret $31,($23),1 + .end sfunction diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds b/util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds new file mode 100644 index 0000000000..ed632686a8 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/kunzip.lds @@ -0,0 +1,50 @@ +PAGE_SIZE = 65536; +BASIC_ALIGN = 8; +OUTPUT_FORMAT("elf64-alpha") +ENTRY(__start) +SECTIONS +{ + . = PAGE_SIZE; + _start = .; + /* + * First we place the code and read only data (typically const declared). + * This get placed in rom. + */ + .text : { + _text = .; + *(.text) + _etext = .; + _rodata = .; + *(.rodata); + _erodata = .; + } + /* Global data */ + .data : { + _data = .; + *(.data) + CONSTRUCTORS + *(.got) + *(.sdata) + _edata = .; + } + + /* Important align _bss so bss may be zeroed with quadword access */ + . = ALIGN(BASIC_ALIGN); + .bss : { + _bss = .; + *(.sbss) + *(.scommon) + *(.bss) + *(COMMON) + *(.heap) + *(.stack) + /* Important align _ebss so bss may be zeroed with quadword access */ + . = ALIGN(BASIC_ALIGN); + _ebss = .; + } + _end = .; + + /DISCARD/ : { + *(*) + } +} diff --git a/util/mkelfImage/kunzip_src/arch/alpha/lib/start.S b/util/mkelfImage/kunzip_src/arch/alpha/lib/start.S new file mode 100644 index 0000000000..a89f4ca6de --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/alpha/lib/start.S @@ -0,0 +1,89 @@ +.set noat +.set noreorder +.text + +__original_registers: + .quad 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + +__entry: + .quad entry + +.globl __start +__start: + br $27, __save_registers +__save_registers: + lda $27, (__original_registers - __save_registers)($27) + stq $0, 0($27) + stq $1, 8($27) + stq $2, 16($27) + stq $3, 24($27) + stq $4, 32($27) + stq $5, 40($27) + stq $6, 48($27) + stq $7, 56($27) + stq $8, 64($27) + stq $9, 72($27) + stq $10, 80($27) + stq $11, 88($27) + stq $12, 96($27) + stq $13, 104($27) + stq $14, 112($27) + stq $15, 120($27) + stq $16, 128($27) + stq $17, 136($27) + stq $18, 144($27) + stq $19, 152($27) + stq $20, 160($27) + stq $21, 168($27) + stq $22, 176($27) + stq $23, 184($27) + stq $24, 192($27) + stq $25, 200($27) + stq $26, 208($27) + stq $28, 224($27) + stq $29, 232($27) + stq $30, 240($27) + +__normal_start: + ldgp $29, (__normal_start - __original_registers)($27) + lda $30, _estack + jsr $26, kunzip + +.globl jmp_to_program_entry +jmp_to_program_entry: + br $27, __restore_registers +__restore_registers: + lda $27,(__original_registers - __restore_registers)($27) + stq $16, (__entry - __original_registers)($27) + ldq $0, 0($27) + ldq $1, 8($27) + ldq $2, 16($27) + ldq $3, 24($27) + ldq $4, 32($27) + ldq $5, 40($27) + ldq $6, 48($27) + ldq $7, 56($27) + ldq $8, 64($27) + ldq $9, 72($27) + ldq $10, 80($27) + ldq $11, 88($27) + ldq $12, 96($27) + ldq $13, 104($27) + ldq $14, 112($27) + ldq $15, 120($27) + ldq $16, 128($27) + ldq $17, 136($27) + ldq $18, 144($27) + ldq $19, 152($27) + ldq $20, 160($27) + ldq $21, 168($27) + ldq $22, 176($27) + ldq $23, 184($27) + ldq $24, 192($27) + ldq $25, 200($27) + ldq $26, 208($27) + ldq $28, 224($27) + ldq $29, 232($27) + ldq $30, 240($27) + ldq $27, (__entry - __original_registers)($27) + jsr $31, ($27) diff --git a/util/mkelfImage/kunzip_src/arch/i386/include/stddef.h b/util/mkelfImage/kunzip_src/arch/i386/include/stddef.h new file mode 100644 index 0000000000..88a3b48957 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/i386/include/stddef.h @@ -0,0 +1,15 @@ +#ifndef I386_STDDEF_H +#define I386_STDDEF_H + +typedef long ptrdiff_t; +typedef unsigned long size_t; +typedef long ssize_t; + +typedef int wchar_t; +typedef unsigned int wint_t; + +#define NULL 0 + +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) + +#endif I386_STDDEF_H diff --git a/util/mkelfImage/kunzip_src/arch/i386/include/stdint.h b/util/mkelfImage/kunzip_src/arch/i386/include/stdint.h new file mode 100644 index 0000000000..58d7519cde --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/i386/include/stdint.h @@ -0,0 +1,52 @@ +#ifndef I386_STDINT_H +#define I386_STDINT_H + +/* Exact integral types */ +typedef unsigned char uint8_t; +typedef signed char int8_t; + +typedef unsigned short uint16_t; +typedef signed short int16_t; + +typedef unsigned int uint32_t; +typedef signed int int32_t; + +typedef unsigned long long uint64_t; +typedef signed long long int64_t; + +/* Small types */ +typedef unsigned char uint_least8_t; +typedef signed char int_least8_t; + +typedef unsigned short uint_least16_t; +typedef signed short int_least16_t; + +typedef unsigned int uint_least32_t; +typedef signed int int_least32_t; + +typedef unsigned long long uint_least64_t; +typedef signed long long int_least64_t; + +/* Fast Types */ +typedef unsigned char uint_fast8_t; +typedef signed char int_fast8_t; + +typedef unsigned int uint_fast16_t; +typedef signed int int_fast16_t; + +typedef unsigned int uint_fast32_t; +typedef signed int int_fast32_t; + +typedef unsigned long long uint_fast64_t; +typedef signed long long int_fast64_t; + +/* Types for `void *' pointers. */ +typedef int intptr_t; +typedef unsigned int uintptr_t; + +/* Largest integral types */ +typedef long long int intmax_t; +typedef unsigned long long uintmax_t; + + +#endif /* I386_STDINT_H */ diff --git a/util/mkelfImage/kunzip_src/arch/i386/lib/Makefile b/util/mkelfImage/kunzip_src/arch/i386/lib/Makefile new file mode 100644 index 0000000000..df2474d210 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/i386/lib/Makefile @@ -0,0 +1 @@ +ARCH_OPTIONS= diff --git a/util/mkelfImage/kunzip_src/arch/i386/lib/kunzip.lds b/util/mkelfImage/kunzip_src/arch/i386/lib/kunzip.lds new file mode 100644 index 0000000000..0e649005cf --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/i386/lib/kunzip.lds @@ -0,0 +1,50 @@ +PAGE_SIZE = 4096; +BASIC_ALIGN = 8; +OUTPUT_FORMAT("elf32-i386") +ENTRY(__start) +SECTIONS +{ + . = PAGE_SIZE; + _start = .; + /* + * First we place the code and read only data (typically const declared). + * This get placed in rom. + */ + .text : { + _text = .; + *(.text) + _etext = .; + _rodata = .; + *(.rodata); + _erodata = .; + } + /* Global data */ + .data : { + _data = .; + *(.data) + CONSTRUCTORS + *(.got) + *(.sdata) + _edata = .; + } + + /* Important align _bss so bss may be zeroed with quadword access */ + . = ALIGN(BASIC_ALIGN); + .bss : { + _bss = .; + *(.sbss) + *(.scommon) + *(.bss) + *(COMMON) + *(.heap) + *(.stack) + /* Important align _ebss so bss may be zeroed with quadword access */ + . = ALIGN(BASIC_ALIGN); + _ebss = .; + } + _end = .; + + /DISCARD/ : { + *(*) + } +} diff --git a/util/mkelfImage/kunzip_src/arch/i386/lib/start.S b/util/mkelfImage/kunzip_src/arch/i386/lib/start.S new file mode 100644 index 0000000000..fec7c9da66 --- /dev/null +++ b/util/mkelfImage/kunzip_src/arch/i386/lib/start.S @@ -0,0 +1,36 @@ +.text + +__original_registers: + .long 0, 0, 0, 0, 0, 0 , 0 , 0 +__entry: + .long entry + +.globl __start +__start: + movl %eax, 0+__original_registers + movl %ebx, 4+__original_registers + movl %ecx, 8+__original_registers + movl %edx, 12+__original_registers + movl %esi, 16+__original_registers + movl %edi, 20+__original_registers + movl %esp, 24+__original_registers + movl %ebp, 28+__original_registers + +__normal_start: + movl $_estack, %esp + call kunzip + +.globl jmp_to_program_entry +jmp_to_program_entry: + movl 4(%esp), %eax + movl %eax, __entry + movl 0+__original_registers, %eax + movl 4+__original_registers, %ebx + movl 8+__original_registers, %ecx + movl 12+__original_registers, %edx + movl 16+__original_registers, %esi + movl 20+__original_registers, %edi + movl 24+__original_registers, %esp + movl 28+__original_registers, %ebp + jmp *__entry + \ No newline at end of file diff --git a/util/mkelfImage/kunzip_src/include/stdarg.h b/util/mkelfImage/kunzip_src/include/stdarg.h new file mode 100644 index 0000000000..24f3383198 --- /dev/null +++ b/util/mkelfImage/kunzip_src/include/stdarg.h @@ -0,0 +1,205 @@ +/* stdarg.h for GNU. + Note that the type used in va_arg is supposed to match the + actual type **after default promotions**. + Thus, va_arg (..., short) is not valid. */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +#ifdef __clipper__ +#include "va-clipper.h" +#else +#ifdef __m88k__ +#include "va-m88k.h" +#else +#ifdef __i860__ +#include "va-i860.h" +#else +#ifdef __hppa__ +#include "va-pa.h" +#else +#ifdef __mips__ +#include "va-mips.h" +#else +#ifdef __sparc__ +#include "va-sparc.h" +#else +#ifdef __i960__ +#include "va-i960.h" +#else +#ifdef __alpha__ +#include "va-alpha.h" +#else +#if defined (__H8300__) || defined (__H8300H__) || defined (__H8300S__) +#include "va-h8300.h" +#else +#if defined (__PPC__) && (defined (_CALL_SYSV) || defined (_WIN32)) +#include "va-ppc.h" +#else +#ifdef __arc__ +#include "va-arc.h" +#else +#ifdef __M32R__ +#include "va-m32r.h" +#else +#ifdef __sh__ +#include "va-sh.h" +#else +#ifdef __mn10300__ +#include "va-mn10300.h" +#else +#ifdef __mn10200__ +#include "va-mn10200.h" +#else +#ifdef __v850__ +#include "va-v850.h" +#else + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX) || defined(__NetBSD__) +typedef char *__gnuc_va_list; +#else +typedef void *__gnuc_va_list; +#endif +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +/* Amount of space required in an argument list for an arg of type TYPE. + TYPE may alternatively be an expression whose type is used. */ + +#if defined(sysV68) +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (short) - 1) / sizeof (short)) * sizeof (short)) +#else +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int)) +#endif + +#define va_start(AP, LASTARG) \ + (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG))) + +#undef va_end +void va_end (__gnuc_va_list); /* Defined in libgcc.a */ +#define va_end(AP) ((void)0) + +/* We cast to void * and then to TYPE * because this avoids + a warning about increasing the alignment requirement. */ + +#if (defined (__arm__) && ! defined (__ARMEB__)) || defined (__i386__) || defined (__i860__) || defined (__ns32000__) || defined (__vax__) +/* This is for little-endian machines; small args are padded upward. */ +#define va_arg(AP, TYPE) \ + (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \ + *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))) +#else /* big-endian */ +/* This is for big-endian machines; small args are padded downward. */ +#define va_arg(AP, TYPE) \ + (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \ + *((TYPE *) (void *) ((char *) (AP) \ + - ((sizeof (TYPE) < __va_rounded_size (char) \ + ? sizeof (TYPE) : __va_rounded_size (TYPE)))))) +#endif /* big-endian */ + +/* Copy __gnuc_va_list into another variable of this type. */ +#define __va_copy(dest, src) (dest) = (src) + +#endif /* _STDARG_H */ + +#endif /* not v850 */ +#endif /* not mn10200 */ +#endif /* not mn10300 */ +#endif /* not sh */ +#endif /* not m32r */ +#endif /* not arc */ +#endif /* not powerpc with V.4 calling sequence */ +#endif /* not h8300 */ +#endif /* not alpha */ +#endif /* not i960 */ +#endif /* not sparc */ +#endif /* not mips */ +#endif /* not hppa */ +#endif /* not i860 */ +#endif /* not m88k */ +#endif /* not clipper */ + +#ifdef _STDARG_H +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */ +#undef _VA_LIST +#endif + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST)) +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#ifdef _SCO_DS +#define __VA_LIST +#endif +#endif /* _VA_LIST_ */ +#else /* not __svr4__ || _SCO_DS */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +typedef __gnuc_va_list va_list; +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/util/mkelfImage/kunzip_src/include/stdlib.h b/util/mkelfImage/kunzip_src/include/stdlib.h new file mode 100644 index 0000000000..eb67d20fe7 --- /dev/null +++ b/util/mkelfImage/kunzip_src/include/stdlib.h @@ -0,0 +1,14 @@ +#ifndef STDLIB_H +#define STDLIB_H + +#include + +extern void *malloc(size_t size); +void free(void *ptr); + +/* Extensions to malloc... */ +typedef size_t malloc_mark_t; +void malloc_mark(malloc_mark_t *place); +void malloc_release(malloc_mark_t *place); + +#endif /* STDLIB_H */ diff --git a/util/mkelfImage/kunzip_src/include/string.h b/util/mkelfImage/kunzip_src/include/string.h new file mode 100644 index 0000000000..dbaa1ecd17 --- /dev/null +++ b/util/mkelfImage/kunzip_src/include/string.h @@ -0,0 +1,31 @@ +#ifndef STRING_H +#define STRING_H + +#include + +// yes, linux has fancy ones. We don't care. This stuff gets used +// hardly at all. And the pain of including those files is just too high. + +//extern inline void strcpy(char *dst, char *src) {while (*src) *dst++ = *src++;} + +//extern inline int strlen(char *src) { int i = 0; while (*src++) i++; return i;} + +static inline size_t strnlen(const char *src, size_t max) { + int i = 0; + if (max<0) { + while (*src++) + i++; + return i; + } + else { + while ((*src++) && (i < max)) + i++; + return i; + } +} + +extern void *memcpy(void *dest, const void *src, size_t n); +extern void *memset(void *s, int c, size_t n); +extern int memcmp(const void *s1, const void *s2, size_t n); + +#endif /* STRING_H */ diff --git a/util/mkelfImage/kunzip_src/include/types.h b/util/mkelfImage/kunzip_src/include/types.h new file mode 100644 index 0000000000..4b9dadfe55 --- /dev/null +++ b/util/mkelfImage/kunzip_src/include/types.h @@ -0,0 +1,18 @@ +#ifndef TYPES_H +#define TYPES_H + +#include +#include + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef int64_t s64; + +/* FIXME is BITS_PER_LONG needed? */ + +#endif diff --git a/util/mkelfImage/kunzip_src/lib/inflate.c b/util/mkelfImage/kunzip_src/lib/inflate.c new file mode 100644 index 0000000000..894a98cd27 --- /dev/null +++ b/util/mkelfImage/kunzip_src/lib/inflate.c @@ -0,0 +1,1178 @@ +#define DEBG(x) +#define DEBG1(x) +/* Taken from /usr/src/linux/lib/inflate.c [unmodified] + Used for start32, 1/11/2000 + James Hendricks, Dale Webster */ + +/* inflate.c -- Not copyrighted 1992 by Mark Adler + version c10p1, 10 January 1993 */ + +/* + * Adapted for booting Linux by Hannu Savolainen 1993 + * based on gzip-1.0.3 + * + * Nicolas Pitre , 1999/04/14 : + * Little mods for all variable to reside either into rodata or bss segments + * by marking constant variables with 'const' and initializing all the others + * at run-time only. This allows for the kernel uncompressor to run + * directly from Flash or ROM memory on embeded systems. + */ + +/* + Inflate deflated (PKZIP's method 8 compressed) data. The compression + method searches for as much of the current string of bytes (up to a + length of 258) in the previous 32 K bytes. If it doesn't find any + matches (of at least length 3), it codes the next byte. Otherwise, it + codes the length of the matched string and its distance backwards from + the current position. There is a single Huffman code that codes both + single bytes (called "literals") and match lengths. A second Huffman + code codes the distance information, which follows a length code. Each + length or distance code actually represents a base value and a number + of "extra" (sometimes zero) bits to get to add to the base value. At + the end of each deflated block is a special end-of-block (EOB) literal/ + length code. The decoding process is basically: get a literal/length + code; if EOB then done; if a literal, emit the decoded byte; if a + length then get the distance and emit the referred-to bytes from the + sliding window of previously emitted data. + + There are (currently) three kinds of inflate blocks: stored, fixed, and + dynamic. The compressor deals with some chunk of data at a time, and + decides which method to use on a chunk-by-chunk basis. A chunk might + typically be 32 K or 64 K. If the chunk is incompressible, then the + "stored" method is used. In this case, the bytes are simply stored as + is, eight bits per byte, with none of the above coding. The bytes are + preceded by a count, since there is no longer an EOB code. + + If the data is compressible, then either the fixed or dynamic methods + are used. In the dynamic method, the compressed data is preceded by + an encoding of the literal/length and distance Huffman codes that are + to be used to decode this block. The representation is itself Huffman + coded, and so is preceded by a description of that code. These code + descriptions take up a little space, and so for small blocks, there is + a predefined set of codes, called the fixed codes. The fixed method is + used if the block codes up smaller that way (usually for quite small + chunks), otherwise the dynamic method is used. In the latter case, the + codes are customized to the probabilities in the current block, and so + can code it much better than the pre-determined fixed codes. + + The Huffman codes themselves are decoded using a multi-level table + lookup, in order to maximize the speed of decoding plus the speed of + building the decoding tables. See the comments below that precede the + lbits and dbits tuning parameters. + */ + + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarly, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + +#ifdef RCSID +static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; +#endif + +#ifndef STATIC + +#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H) +# include +# include +#endif + +#include "gzip.h" +#define STATIC +#endif /* !STATIC */ + +#define slide window + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). + Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 + means that v is a literal, 16 < e < 32 means that v is a pointer to + the next table, which codes e - 16 bits, and lastly e == 99 indicates + an unused code. If a code with e == 99 is looked up, this implies an + error in the data. */ +struct huft { + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ + union { + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ + } v; +}; + + +/* Function prototypes */ +STATIC int huft_build OF((unsigned *, unsigned, unsigned, + const ush *, const ush *, struct huft **, int *)); +STATIC int huft_free OF((struct huft *)); +STATIC int inflate_codes OF((struct huft *, struct huft *, int, int)); +STATIC int inflate_stored OF((void)); +STATIC int inflate_fixed OF((void)); +STATIC int inflate_dynamic OF((void)); +STATIC int inflate_block OF((int *)); +STATIC int inflate OF((void)); + + +/* The inflate algorithm uses a sliding 32 K byte window on the uncompressed + stream to find repeated byte strings. This is implemented here as a + circular buffer. The index is updated simply by incrementing and then + ANDing with 0x7fff (32K-1). */ +/* It is left to other modules to supply the 32 K area. It is assumed + to be usable as if it were declared "uch slide[32768];" or as just + "uch *slide;" and then malloc'ed in the latter case. The definition + must be in unzip.h, included above. */ +/* unsigned wp; current position in slide */ +#define wp outcnt +#define flush_output(w) (wp=(w),flush_window()) + +/* Tables for deflate from PKZIP's appnote.txt. */ +static const unsigned border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* note: see note #13 above about the 258 in this list. */ +static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +static const ush cpdext[] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + + + +/* Macros for inflate() bit peeking and grabbing. + The usage is: + + NEEDBITS(j) + x = b & mask_bits[j]; + DUMPBITS(j) + + where NEEDBITS makes sure that b has at least j bits in it, and + DUMPBITS removes the bits from b. The macros use the variable k + for the number of bits in b. Normally, b and k are register + variables for speed, and are initialized at the beginning of a + routine that uses these macros from a global bit buffer and count. + + If we assume that EOB will be the longest code, then we will never + ask for bits with NEEDBITS that are beyond the end of the stream. + So, NEEDBITS should not read any more bytes than are needed to + meet the request. Then no bytes need to be "returned" to the buffer + at the end of the last block. + + However, this assumption is not true for fixed blocks--the EOB code + is 7 bits, but the other literal/length codes can be 8 or 9 bits. + (The EOB code is shorter than other codes because fixed blocks are + generally short. So, while a block always has an EOB, many other + literal/length codes have a significantly lower probability of + showing up at all.) However, by making the first table have a + lookup of seven bits, the EOB code will be found in that first + lookup, and so will not require that too many bits be pulled from + the stream. + */ + +STATIC ulg bb; /* bit buffer */ +STATIC unsigned bk; /* bits in bit buffer */ + +STATIC const ush mask_bits[] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +#define NEXTBYTE() (uch)get_byte() +#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} + + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +STATIC const int lbits = 9; /* bits in base literal/length lookup table */ +STATIC const int dbits = 6; /* bits in base distance lookup table */ + + +/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ + + +STATIC unsigned hufts; /* track memory usage */ + + +STATIC int huft_build(b, n, s, d, e, t, m) +unsigned *b; /* code lengths in bits (all assumed <= BMAX) */ +unsigned n; /* number of codes (assumed <= N_MAX) */ +unsigned s; /* number of simple-valued codes (0..s-1) */ +const ush *d; /* list of base values for non-simple codes */ +const ush *e; /* list of extra bits for non-simple codes */ +struct huft **t; /* result: starting table */ +int *m; /* maximum lookup bits, returns actual */ +/* Given a list of code lengths and a maximum table size, make a set of + tables to decode that set of codes. Return zero on success, one if + the given code set is incomplete (the tables are still built in this + case), two if the input is invalid (all zero length codes or an + oversubscribed set of lengths), and three if not enough memory. */ +{ + unsigned a; /* counter for codes of length k */ + unsigned c[BMAX+1]; /* bit length count table */ + unsigned f; /* i repeats in table every f entries */ + int g; /* maximum code length */ + int h; /* table level */ + register unsigned i; /* counter, current code */ + register unsigned j; /* counter */ + register int k; /* number of bits in current code */ + int l; /* bits per table (returned in m) */ + register unsigned *p; /* pointer into c[], b[], or v[] */ + register struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + struct huft *u[BMAX]; /* table stack */ + unsigned v[N_MAX]; /* values in order of bit length */ + register int w; /* bits before this table == (l * h) */ + unsigned x[BMAX+1]; /* bit offsets, then code stack */ + unsigned *xp; /* pointer into x */ + int y; /* number of dummy codes added */ + unsigned z; /* number of entries in current table */ + +DEBG("huft1 "); + + /* Generate counts for each bit length */ + memzero(c, sizeof(c)); + p = b; i = n; + do { + Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n-i, *p)); + c[*p]++; /* assume all entries <= BMAX */ + p++; /* Can't combine with above line (Solaris bug) */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (struct huft *)NULL; + *m = 0; + return 0; + } + +DEBG("huft2 "); + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((unsigned)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((unsigned)l > i) + l = i; + *m = l; + +DEBG("huft3 "); + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return 2; /* bad input: more codes than bits */ + if ((y -= c[i]) < 0) + return 2; + c[i] += y; + +DEBG("huft4 "); + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p++); + } + +DEBG("huft5 "); + + /* Make a table of values in order of bit lengths */ + p = b; i = 0; + do { + if ((j = *p++) != 0) + v[x[j]++] = i; + } while (++i < n); + +DEBG("h6 "); + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ +DEBG("h6a "); + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { +DEBG("h6b "); + a = c[k]; + while (a--) + { +DEBG("h6b1 "); + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { +DEBG1("1 "); + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ +DEBG1("2 "); + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } +DEBG1("3 "); + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == + (struct huft *)NULL) + { + if (h) + huft_free(u[0]); + return 3; /* not enough memory */ + } +DEBG1("4 "); + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->v.t)) = (struct huft *)NULL; + u[h] = ++q; /* table starts after link */ + +DEBG1("5 "); + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } +DEBG1("6 "); + } +DEBG("h6c "); + + /* set up table entry in r */ + r.b = (uch)(k - w); + if (p >= v + n) + r.e = 99; /* out of values--invalid code */ + else if (*p < s) + { + r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = (ush)(*p); /* simple code is just the value */ + p++; /* one compiler does not like *p++ */ + } + else + { + r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ + r.v.n = d[*p++ - s]; + } +DEBG("h6d "); + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } +DEBG("h6e "); + } +DEBG("h6f "); + } + +DEBG("huft7 "); + + /* Return true (1) if we were given an incomplete table */ + return y != 0 && g != 1; +} + + + +STATIC int huft_free(t) +struct huft *t; /* table to free */ +/* Free the malloc'ed tables built by huft_build(), which makes a linked + list of the tables it made, with the links in a dummy first entry of + each table. */ +{ + register struct huft *p, *q; + + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + p = t; + while (p != (struct huft *)NULL) + { + q = (--p)->v.t; + free((char*)p); + p = q; + } + return 0; +} + + +STATIC int inflate_codes(tl, td, bl, bd) +struct huft *tl, *td; /* literal/length and distance decoder tables */ +int bl, bd; /* number of bits decoded by tl[] and td[] */ +/* inflate (decompress) the codes in a deflated (compressed) block. + Return an error code or zero if it all goes ok. */ +{ + register unsigned e; /* table entry flag/number of extra bits */ + unsigned n, d; /* length and index for copy */ + unsigned w; /* current window position */ + struct huft *t; /* pointer to table entry */ + unsigned ml, md; /* masks for bl and bd bits */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + + /* make local copies of globals */ + b = bb; /* initialize bit buffer */ + k = bk; + w = wp; /* initialize window position */ + + /* inflate the coded data */ + ml = mask_bits[bl]; /* precompute masks for speed */ + md = mask_bits[bd]; + for (;;) /* do until end of block */ + { + NEEDBITS((unsigned)bl) + if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + DUMPBITS(t->b) + if (e == 16) /* then it's a literal */ + { + slide[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", slide[w-1])); + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if (e == 15) + break; + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)bd) + if ((e = (t = td + ((unsigned)b & md))->e) > 16) + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + DUMPBITS(t->b) + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + Tracevv((stderr,"\\[%d,%d]", w-d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if (w - d >= e) /* (this test assumes unsigned comparison) */ + { + memcpy(slide + w, slide + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + do { + slide[w++] = slide[d++]; + Tracevv((stderr, "%c", slide[w-1])); + } while (--e); + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + } while (n); + } + } + + + /* restore the globals from the locals */ + wp = w; /* restore global window pointer */ + bb = b; /* restore global bit buffer */ + bk = k; + + /* done */ + return 0; +} + + + +STATIC int inflate_stored() +/* "decompress" an inflated type 0 (stored) block. */ +{ + unsigned n; /* number of bytes in block */ + unsigned w; /* current window position */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + +DEBG(""); + return 0; +} + + + +STATIC int inflate_fixed() +/* decompress an inflated type 1 (fixed Huffman codes) block. We should + either replace this with a custom decoder, or at least precompute the + Huffman tables. */ +{ + int i; /* temporary variable */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned l[288]; /* length list for huft_build */ + +DEBG(" 1) + { + huft_free(tl); + + DEBG(">"); + return i; + } + + + /* decompress until an end-of-block code */ + if (inflate_codes(tl, td, bl, bd)) + return 1; + + + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); + return 0; +} + + + +STATIC int inflate_dynamic() +/* decompress an inflated type 2 (dynamic Huffman codes) block. */ +{ + int i; /* temporary variables */ + unsigned j; + unsigned l; /* last length */ + unsigned m; /* mask for bit lengths table */ + unsigned n; /* number of lengths to get */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned nb; /* number of bit length codes */ + unsigned nl; /* number of literal/length codes */ + unsigned nd; /* number of distance codes */ +#ifdef PKZIP_BUG_WORKAROUND + unsigned ll[288+32]; /* literal/length and distance code lengths */ +#else + unsigned ll[286+30]; /* literal/length and distance code lengths */ +#endif + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + +DEBG(" 288 || nd > 32) +#else + if (nl > 286 || nd > 30) +#endif + return 1; /* bad lengths */ + +DEBG("dyn1 "); + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) + { + NEEDBITS(3) + ll[border[j]] = (unsigned)b & 7; + DUMPBITS(3) + } + for (; j < 19; j++) + ll[border[j]] = 0; + +DEBG("dyn2 "); + + /* build decoding table for trees--single level, 7 bit lookup */ + bl = 7; + if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) + { + if (i == 1) + huft_free(tl); + return i; /* incomplete code set */ + } + +DEBG("dyn3 "); + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[bl]; + i = l = 0; + while ((unsigned)i < n) + { + NEEDBITS((unsigned)bl) + j = (td = tl + ((unsigned)b & m))->b; + DUMPBITS(j) + j = td->v.n; + if (j < 16) /* length of code in bits (0..15) */ + ll[i++] = l = j; /* save last length in l */ + else if (j == 16) /* repeat last length 3 to 6 times */ + { + NEEDBITS(2) + j = 3 + ((unsigned)b & 3); + DUMPBITS(2) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = l; + } + else if (j == 17) /* 3 to 10 zero length codes */ + { + NEEDBITS(3) + j = 3 + ((unsigned)b & 7); + DUMPBITS(3) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = 0; + l = 0; + } + else /* j == 18: 11 to 138 zero length codes */ + { + NEEDBITS(7) + j = 11 + ((unsigned)b & 0x7f); + DUMPBITS(7) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = 0; + l = 0; + } + } + +DEBG("dyn4 "); + + /* free decoding table for trees */ + huft_free(tl); + +DEBG("dyn5 "); + + /* restore the global bit buffer */ + bb = b; + bk = k; + +DEBG("dyn5a "); + + /* build the decoding tables for literal/length and distance codes */ + bl = lbits; + if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) + { +DEBG("dyn5b "); + if (i == 1) { + error(" incomplete literal tree\n"); + huft_free(tl); + } + return i; /* incomplete code set */ + } +DEBG("dyn5c "); + bd = dbits; + if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) + { +DEBG("dyn5d "); + if (i == 1) { + error(" incomplete distance tree\n"); +#ifdef PKZIP_BUG_WORKAROUND + i = 0; + } +#else + huft_free(td); + } + huft_free(tl); + return i; /* incomplete code set */ +#endif + } + +DEBG("dyn6 "); + + /* decompress until an end-of-block code */ + if (inflate_codes(tl, td, bl, bd)) + return 1; + +DEBG("dyn7 "); + + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); + + DEBG(">"); + return 0; +} + + + +STATIC int inflate_block(e) +int *e; /* last block flag */ +/* decompress an inflated block */ +{ + unsigned t; /* block type */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + DEBG(""); + + /* bad block type */ + return 2; +} + + + +STATIC int inflate() +/* decompress an inflated entry */ +{ + int e; /* last block flag */ + int r; /* result code */ + unsigned h; /* maximum struct huft's malloc'ed */ + malloc_mark_t mark; + + /* initialize window, bit buffer */ + wp = 0; + bk = 0; + bb = 0; + + + /* decompress until the last block */ + h = 0; + do { + hufts = 0; + malloc_mark(&mark); + if ((r = inflate_block(&e)) != 0) { + malloc_release(&mark); + return r; + } + malloc_release(&mark); + if (hufts > h) + h = hufts; + } while (!e); + + /* Undo too much lookahead. The next read will be byte aligned so we + * can discard unused bits in the last meaningful byte. + */ + while (bk >= 8) { + bk -= 8; + inptr--; + } + + /* flush out slide */ + flush_output(wp); + + + /* return success */ + DBG(("<%u> ", h)); + return 0; +} + +/********************************************************************** + * + * The following are support routines for inflate.c + * + **********************************************************************/ + +static ulg crc_32_tab[256]; +static ulg crc; /* initialized in makecrc() so it'll reside in bss */ +#define CRC_VALUE (crc ^ 0xffffffffL) + +/* + * Code to compute the CRC-32 table. Borrowed from + * gzip-1.0.3/makecrc.c. + */ + +static void +makecrc(void) +{ +/* Not copyrighted 1990 Mark Adler */ + + unsigned long c; /* crc shift register */ + unsigned long e; /* polynomial exclusive-or pattern */ + int i; /* counter for all possible eight bit values */ + int k; /* byte being shifted into crc apparatus */ + + /* terms of polynomial defining this crc (except x^32): */ + static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; + + /* Make exclusive-or pattern from polynomial */ + e = 0; + for (i = 0; i < sizeof(p)/sizeof(int); i++) + e |= 1L << (31 - p[i]); + + crc_32_tab[0] = 0; + + for (i = 1; i < 256; i++) + { + c = 0; + for (k = i | 256; k != 1; k >>= 1) + { + c = c & 1 ? (c >> 1) ^ e : c >> 1; + if (k & 1) + c ^= e; + } + crc_32_tab[i] = c; + } + + /* this is initialized here so this code could reside in ROM */ + crc = (ulg)0xffffffffL; /* shift register contents */ +} + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +/* + * Do the uncompression! + */ +int gunzip(void) +{ + uch flags; + unsigned char magic[2]; /* magic header */ + char method; + ulg orig_crc = 0; /* original crc */ + ulg orig_len = 0; /* original uncompressed length */ + int res; + + magic[0] = (unsigned char)get_byte(); + magic[1] = (unsigned char)get_byte(); + method = (unsigned char)get_byte(); + + if (magic[0] != 037 || + ((magic[1] != 0213) && (magic[1] != 0236))) { + error("bad gzip magic numbers"); + return -1; + } + + /* We only support method #8, DEFLATED */ + if (method != 8) { + error("internal error, invalid method"); + return -1; + } + + flags = (uch)get_byte(); + if ((flags & ENCRYPTED) != 0) { + error("Input is encrypted\n"); + return -1; + } + if ((flags & CONTINUATION) != 0) { + error("Multi part input\n"); + return -1; + } + if ((flags & RESERVED) != 0) { + error("Input has invalid flags\n"); + return -1; + } + (ulg)get_byte(); /* Get timestamp */ + ((ulg)get_byte()) << 8; + ((ulg)get_byte()) << 16; + ((ulg)get_byte()) << 24; + + (void)get_byte(); /* Ignore extra flags for the moment */ + (void)get_byte(); /* Ignore OS type for the moment */ + + if ((flags & EXTRA_FIELD) != 0) { + unsigned len = (unsigned)get_byte(); + len |= ((unsigned)get_byte())<<8; + while (len--) (void)get_byte(); + } + + /* Get original file name if it was truncated */ + if ((flags & ORIG_NAME) != 0) { + /* Discard the old name */ + while (get_byte() != 0) /* null */ ; + } + + /* Discard file comment if any */ + if ((flags & COMMENT) != 0) { + while (get_byte() != 0) /* null */ ; + } + + /* Decompress */ + if ((res = inflate())) { + switch (res) { + case 0: + break; + case 1: + error("invalid compressed format (err=1)"); + break; + case 2: + error("invalid compressed format (err=2)"); + break; + case 3: + error("out of memory"); + break; + default: + error("invalid compressed format (other)"); + } + return -1; + } + + /* Get the crc and original length */ + /* crc32 (see algorithm.doc) + * uncompressed input size modulo 2^32 + */ + orig_crc = (ulg) get_byte(); + orig_crc |= (ulg) get_byte() << 8; + orig_crc |= (ulg) get_byte() << 16; + orig_crc |= (ulg) get_byte() << 24; + + orig_len = (ulg) get_byte(); + orig_len |= (ulg) get_byte() << 8; + orig_len |= (ulg) get_byte() << 16; + orig_len |= (ulg) get_byte() << 24; + + /* Validate decompression */ + if (orig_crc != CRC_VALUE) { + error("crc error"); + return -1; + } + if (orig_len != bytes_out) { + error("length error"); + return -1; + } + return 0; +} + + diff --git a/util/mkelfImage/kunzip_src/lib/kunzip.c b/util/mkelfImage/kunzip_src/lib/kunzip.c new file mode 100644 index 0000000000..4bfe504f70 --- /dev/null +++ b/util/mkelfImage/kunzip_src/lib/kunzip.c @@ -0,0 +1,170 @@ +#include +#include + +/* + * gzip support routine declartions.. + * ========================================================= + */ + +#ifdef DEBUG +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +# define DBG(x) printf x +#else +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +# define DBG(x) +#endif + +void error(char *str) +{ + DBG(("%s\n", str)); +} + +static unsigned char *inbuf; /* input buffer */ +static unsigned int insize; /* valid bytes in inbuf */ +static unsigned int inptr; /* index of next byte to be processed in inbuf */ + +#if !defined(DEBUG) +#define get_byte() (inptr < insize ? inbuf[inptr++] : 0) +#else +static unsigned char get_byte(void) +{ + static int count; + unsigned char byte = (inptr < insize ? inbuf[inptr++] : 0); +#if 0 + printf("%02x ", byte); + if ((++count & 0x0f) == 0) { + printf("\n"); + } +#endif + return byte; +} + +#endif + +static void flush_window(void); + +static long bytes_out; /* total bytes compressed */ +static unsigned outcnt; /* bytes in output buffer */ + +#define WSIZE 0x8000 /* Window size must be at least 32k, and a power of two */ +static unsigned char window[WSIZE]; /* Sliding window buffer */ + +/* + * gzip declarations + */ + +#define OF(args) args +#define STATIC static + + +#define memzero(s, n) memset ((s), 0, (n)) + +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; + + + +#include "inflate.c" + + +/* Variables that gunzip doesn't need to see... */ +static unsigned char *output_ptr; +static unsigned long end_offset; +static struct unzip_region { + unsigned long start; + unsigned long end_offset; +} unzip_region; + +/* Data provided by the header */ +extern unsigned char zipped_data[]; +extern unsigned char zipped_data_end[]; +extern unsigned char entry; +/* Assembly language routines */ +extern void jmp_to_program_entry(void *); + +/* =========================================================================== + * Write the output window window[0..outcnt-1] and update crc and bytes_out. + * (Used for the decompressed data only.) + */ +static void flush_window(void) +{ + ulg c = crc; /* temporary variable */ + unsigned n; + unsigned long limit; + uch *in, *out, ch; + + limit = outcnt; + + + n = 0; + in = window; + while (n < outcnt) { + limit = end_offset - bytes_out +n; + if (limit > outcnt) { + limit = outcnt; + } + out = output_ptr; + DBG(("flush 0x%08lx start 0x%08lx limit 0x%08lx\n", + (unsigned long) out, (unsigned long)n, limit)); + for (; n < limit; n++) { + ch = *out++ = *in++; + c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8); + } + crc = c; + bytes_out += (out - output_ptr); + output_ptr = out; + if (bytes_out == end_offset) { + if (output_ptr == (unsigned char *)(&unzip_region+1)) { + output_ptr = (unsigned char *)(unzip_region.start); + end_offset = unzip_region.end_offset; + } else { + output_ptr = (unsigned char *)&unzip_region; + end_offset += sizeof(unzip_region); + } + } + } + outcnt = 0; +} + + +void gunzip_setup(void) +{ + DBG(("gunzip_setup\n")); + outcnt = 0; + bytes_out = 0; + + end_offset = sizeof(unzip_region); + output_ptr = (unsigned char *)&unzip_region; + + inbuf = &zipped_data[0]; + insize = zipped_data_end - zipped_data; + inptr = 0; + + makecrc(); + DBG(("gunzip_setup_done\n")); +} + + +int kunzip(int argc, char **argv) +{ + DBG(("kunzip\n")); + gunzip_setup(); + DBG(("pre_gunzip\n")); + if (gunzip() != 0) { + error("gunzip failed"); + while(1) {} + return -1; + } + DBG(("pre_jmp_to_program_entry: %p\n", &entry )); + jmp_to_program_entry(&entry); + return 0; +} diff --git a/util/mkelfImage/kunzip_src/lib/malloc.c b/util/mkelfImage/kunzip_src/lib/malloc.c new file mode 100644 index 0000000000..8e43e0c226 --- /dev/null +++ b/util/mkelfImage/kunzip_src/lib/malloc.c @@ -0,0 +1,42 @@ +#include + +extern unsigned char _heap, _eheap; +static size_t free_mem_ptr = (size_t)&_heap; /* Start of heap */ +static size_t free_mem_end_ptr = (size_t)&_eheap; /* End of heap */ + + +void malloc_mark(malloc_mark_t *place) +{ + *place = free_mem_ptr; +} + +void malloc_release(malloc_mark_t *ptr) +{ + free_mem_ptr = *ptr; +} + +void *malloc(size_t size) +{ + void *p; + + if (size < 0) + error("Error! malloc: Size < 0"); + if (free_mem_ptr <= 0) + error("Error! malloc: Free_mem_ptr <= 0"); + + free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ + + p = (void *) free_mem_ptr; + free_mem_ptr += size; + + if (free_mem_ptr >= free_mem_end_ptr) + error("Error! malloc: Free_mem_ptr >= free_mem_end_ptr"); + + + return p; +} + +void free(void *where) +{ + /* Don't care */ +} diff --git a/util/mkelfImage/kunzip_src/lib/memcmp.c b/util/mkelfImage/kunzip_src/lib/memcmp.c new file mode 100644 index 0000000000..46f13a41bd --- /dev/null +++ b/util/mkelfImage/kunzip_src/lib/memcmp.c @@ -0,0 +1,17 @@ +#include + +int memcmp(const void *src1, const void *src2, size_t bytes) +{ + const unsigned char *s1, *s2; + int result; + s1 = src1; + s2 = src2; + result = 0; + while((bytes > 0) && (result == 0)) { + result = *s1 - *s2; + bytes--; + s1++; + s2++; + } + return result; +} diff --git a/util/mkelfImage/kunzip_src/lib/memcpy.c b/util/mkelfImage/kunzip_src/lib/memcpy.c new file mode 100644 index 0000000000..ad8e8bd3f0 --- /dev/null +++ b/util/mkelfImage/kunzip_src/lib/memcpy.c @@ -0,0 +1,11 @@ +#include +void *memcpy(void *__dest, __const void *__src, size_t __n) +{ + int i; + char *d = (char *) __dest, *s = (char *) __src; + + for (i = 0; i < __n; i++) + d[i] = s[i]; + + return __dest; +} diff --git a/util/mkelfImage/kunzip_src/lib/memset.c b/util/mkelfImage/kunzip_src/lib/memset.c new file mode 100644 index 0000000000..c1bb4f841f --- /dev/null +++ b/util/mkelfImage/kunzip_src/lib/memset.c @@ -0,0 +1,12 @@ +#include + +void *memset(void *s, int c, size_t n) +{ + int i; + char *ss = (char *) s; + + for (i = 0; i < n; i++) + ss[i] = c; + + return s; +} -- cgit v1.2.3