From 52db0b984523047da19ca3b41558b9dbf45abad7 Mon Sep 17 00:00:00 2001 From: Stefan Reinauer Date: Fri, 7 Dec 2012 17:15:04 -0800 Subject: WIP: Initial ARMv7 architecture implementation in coreboot The first ARMv7 CPU we're going to support is the Exynos 5250 used in the Google Snow ChromeBook. Change-Id: I4de8433bbc6202eb8fef2556a11186a3376d411b Signed-off-by: David Hendricks Signed-off-by: Stefan Reinauer Signed-off-by: Ronald G. Minnich Reviewed-on: http://review.coreboot.org/2004 Tested-by: build bot (Jenkins) --- src/arch/armv7/Kconfig | 52 ++ src/arch/armv7/Makefile.inc | 324 +++++++++++ src/arch/armv7/boot/Makefile.inc | 13 + src/arch/armv7/boot/acpi.c | 791 ++++++++++++++++++++++++++ src/arch/armv7/boot/acpigen.c | 729 ++++++++++++++++++++++++ src/arch/armv7/boot/boot.c | 189 ++++++ src/arch/armv7/boot/coreboot_table.c | 711 +++++++++++++++++++++++ src/arch/armv7/boot/multiboot.c | 77 +++ src/arch/armv7/boot/tables.c | 100 ++++ src/arch/armv7/boot/wakeup.S | 94 +++ src/arch/armv7/bootblock_simple.c | 51 ++ src/arch/armv7/coreboot_ram.ld | 128 +++++ src/arch/armv7/cpu.c | 101 ++++ src/arch/armv7/include/arch/atomic.h | 111 ++++ src/arch/armv7/include/arch/boot/boot.h | 8 + src/arch/armv7/include/arch/byteorder.h | 27 + src/arch/armv7/include/arch/coreboot_tables.h | 25 + src/arch/armv7/include/arch/cpu.h | 21 + src/arch/armv7/include/arch/gpio.h | 98 ++++ src/arch/armv7/include/arch/hlt.h | 10 + src/arch/armv7/include/arch/io.h | 427 ++++++++++++++ src/arch/armv7/include/arch/pci_ops.h | 19 + src/arch/armv7/include/arch/types.h | 53 ++ src/arch/armv7/include/armv7.h | 76 +++ src/arch/armv7/include/assembler.h | 60 ++ src/arch/armv7/include/bootblock_common.h | 69 +++ src/arch/armv7/include/cache.h | 56 ++ src/arch/armv7/include/clocks.h | 44 ++ src/arch/armv7/include/common.h | 494 ++++++++++++++++ src/arch/armv7/include/div64.h | 233 ++++++++ src/arch/armv7/include/global_data.h | 108 ++++ src/arch/armv7/include/hang.h | 20 + src/arch/armv7/include/mmio_conf.h | 6 + src/arch/armv7/include/ptrace.h | 30 + src/arch/armv7/include/smp/spinlock.h | 52 ++ src/arch/armv7/include/stdint.h | 81 +++ src/arch/armv7/include/system.h | 123 ++++ src/arch/armv7/include/utils.h | 56 ++ src/arch/armv7/ldscript_failover.lb | 74 +++ src/arch/armv7/ldscript_fallback_cbfs.lb | 51 ++ src/arch/armv7/lib/Makefile.inc | 26 + src/arch/armv7/lib/Makefile.uboot | 95 ++++ src/arch/armv7/lib/_divsi3.S | 142 +++++ src/arch/armv7/lib/_udivsi3.S | 93 +++ src/arch/armv7/lib/_uldivmod.S | 157 +++++ src/arch/armv7/lib/bootm.c | 405 +++++++++++++ src/arch/armv7/lib/c_start.S | 9 + src/arch/armv7/lib/cache-cp15.c | 304 ++++++++++ src/arch/armv7/lib/cache_v7.c | 427 ++++++++++++++ src/arch/armv7/lib/div.c | 5 + src/arch/armv7/lib/div0.c | 32 ++ src/arch/armv7/lib/div64.S | 208 +++++++ src/arch/armv7/lib/eabi_compat.c | 33 ++ src/arch/armv7/lib/hang_spl.c | 16 + src/arch/armv7/lib/id.inc | 18 + src/arch/armv7/lib/id.lds | 6 + src/arch/armv7/lib/interrupts.c | 198 +++++++ src/arch/armv7/lib/memcpy.S | 243 ++++++++ src/arch/armv7/lib/memset.S | 126 ++++ src/arch/armv7/lib/reset.c | 53 ++ src/arch/armv7/lib/romstage_console.c | 75 +++ src/arch/armv7/lib/syslib.c | 70 +++ src/arch/armv7/romstage.ld | 119 ++++ src/arch/armv7/start.S | 543 ++++++++++++++++++ 64 files changed, 9195 insertions(+) create mode 100644 src/arch/armv7/Makefile.inc create mode 100644 src/arch/armv7/boot/Makefile.inc create mode 100644 src/arch/armv7/boot/acpi.c create mode 100644 src/arch/armv7/boot/acpigen.c create mode 100644 src/arch/armv7/boot/boot.c create mode 100644 src/arch/armv7/boot/coreboot_table.c create mode 100644 src/arch/armv7/boot/multiboot.c create mode 100644 src/arch/armv7/boot/tables.c create mode 100644 src/arch/armv7/boot/wakeup.S create mode 100644 src/arch/armv7/bootblock_simple.c create mode 100644 src/arch/armv7/coreboot_ram.ld create mode 100644 src/arch/armv7/cpu.c create mode 100644 src/arch/armv7/include/arch/atomic.h create mode 100644 src/arch/armv7/include/arch/boot/boot.h create mode 100644 src/arch/armv7/include/arch/byteorder.h create mode 100644 src/arch/armv7/include/arch/coreboot_tables.h create mode 100644 src/arch/armv7/include/arch/gpio.h create mode 100644 src/arch/armv7/include/arch/hlt.h create mode 100644 src/arch/armv7/include/arch/io.h create mode 100644 src/arch/armv7/include/arch/pci_ops.h create mode 100644 src/arch/armv7/include/arch/types.h create mode 100644 src/arch/armv7/include/armv7.h create mode 100644 src/arch/armv7/include/assembler.h create mode 100644 src/arch/armv7/include/bootblock_common.h create mode 100644 src/arch/armv7/include/cache.h create mode 100644 src/arch/armv7/include/clocks.h create mode 100644 src/arch/armv7/include/common.h create mode 100644 src/arch/armv7/include/div64.h create mode 100644 src/arch/armv7/include/global_data.h create mode 100644 src/arch/armv7/include/hang.h create mode 100644 src/arch/armv7/include/mmio_conf.h create mode 100644 src/arch/armv7/include/ptrace.h create mode 100644 src/arch/armv7/include/smp/spinlock.h create mode 100644 src/arch/armv7/include/stdint.h create mode 100644 src/arch/armv7/include/system.h create mode 100644 src/arch/armv7/include/utils.h create mode 100644 src/arch/armv7/ldscript_failover.lb create mode 100644 src/arch/armv7/ldscript_fallback_cbfs.lb create mode 100644 src/arch/armv7/lib/Makefile.inc create mode 100644 src/arch/armv7/lib/Makefile.uboot create mode 100644 src/arch/armv7/lib/_divsi3.S create mode 100644 src/arch/armv7/lib/_udivsi3.S create mode 100644 src/arch/armv7/lib/_uldivmod.S create mode 100644 src/arch/armv7/lib/bootm.c create mode 100644 src/arch/armv7/lib/c_start.S create mode 100644 src/arch/armv7/lib/cache-cp15.c create mode 100644 src/arch/armv7/lib/cache_v7.c create mode 100644 src/arch/armv7/lib/div.c create mode 100644 src/arch/armv7/lib/div0.c create mode 100644 src/arch/armv7/lib/div64.S create mode 100644 src/arch/armv7/lib/eabi_compat.c create mode 100644 src/arch/armv7/lib/hang_spl.c create mode 100644 src/arch/armv7/lib/id.inc create mode 100644 src/arch/armv7/lib/id.lds create mode 100644 src/arch/armv7/lib/interrupts.c create mode 100644 src/arch/armv7/lib/memcpy.S create mode 100644 src/arch/armv7/lib/memset.S create mode 100644 src/arch/armv7/lib/reset.c create mode 100644 src/arch/armv7/lib/romstage_console.c create mode 100644 src/arch/armv7/lib/syslib.c create mode 100644 src/arch/armv7/romstage.ld create mode 100644 src/arch/armv7/start.S (limited to 'src/arch/armv7') diff --git a/src/arch/armv7/Kconfig b/src/arch/armv7/Kconfig index 00e6549d98..f92911d6b7 100644 --- a/src/arch/armv7/Kconfig +++ b/src/arch/armv7/Kconfig @@ -1,3 +1,55 @@ menu "Architecture (armv7)" +config SPL_BUILD + bool "Build second-phase bootloader (SPL)" + default y + +config EABI_COMPAT + bool "Toolchain is EABI compatible" + default n + +# Maximum reboot count +# TODO: Improve description. +config MAX_REBOOT_CNT + int + default 3 + +choice + prompt "Bootblock behaviour" + default ARM_BOOTBLOCK_SIMPLE + +config ARM_BOOTBLOCK_SIMPLE + bool "Always load fallback" + +config ARM_BOOTBLOCK_NORMAL + bool "Switch to normal if non-volatile memory says so" + +endchoice + +config BOOTBLOCK_SOURCE + string + default "bootblock_simple.c" if ARM_BOOTBLOCK_SIMPLE + default "bootblock_normal.c" if ARM_BOOTBLOCK_NORMAL + +config UPDATE_IMAGE + bool "Update existing coreboot.rom image" + default n + depends on TINY_BOOTBLOCK + help + If this option is enabled, no new coreboot.rom file + is created. Instead it is expected that there already + is a suitable file for further processing. + The bootblock will not be modified. + +config BOOTBLOCK_SOC_INIT + string + +# FIXME: Should cache policy be set on a per-CPU basis? +# FIXME(dhendrix): Stefan sayz to make a smart decision and not prompt the user. +config ARM_DCACHE_POLICY_WRITEBACK + bool y + +config ARM_DCACHE_POLICY_WRITETHROUGH + bool n + endmenu diff --git a/src/arch/armv7/Makefile.inc b/src/arch/armv7/Makefile.inc new file mode 100644 index 0000000000..076a93ff5f --- /dev/null +++ b/src/arch/armv7/Makefile.inc @@ -0,0 +1,324 @@ +################################################################################ +## +## This file is part of the coreboot project. +## +## Copyright (C) 2012 The ChromiumOS Authors +## Copyright (C) 2012 Alexandru Gagniuc +## Copyright (C) 2009-2010 coresystems GmbH +## Copyright (C) 2009 Ronald G. Minnich +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; version 2 of the License. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +## +################################################################################ + +# Take care of subdirectories +subdirs-y += boot/ +subdirs-y += lib/ +# subdirs-y += smp/ + +################################################################################ +# Build the final rom image +COREBOOT_ROM_DEPENDENCIES:= +ifeq ($(CONFIG_PAYLOAD_ELF),y) +COREBOOT_ROM_DEPENDENCIES+=$(CONFIG_PAYLOAD_FILE) +endif +#ifeq ($(CONFIG_AP_CODE_IN_CAR),y) +#COREBOOT_ROM_DEPENDENCIES+=$(objcbfs)/coreboot_ap.elf +#endif + +extract_nth=$(word $(1), $(subst |, ,$(2))) + +ifneq ($(CONFIG_UPDATE_IMAGE),y) +prebuild-files = \ + $(foreach file,$(cbfs-files), \ + $(CBFSTOOL) $@.tmp \ + add$(if $(filter stage,$(call extract_nth,3,$(file))),-stage)$(if $(filter payload,$(call extract_nth,3,$(file))),-payload) \ + $(call extract_nth,1,$(file)) \ + $(call extract_nth,2,$(file)) $(if $(filter-out stage payload,$(call extract_nth,3,$(file))),$(call extract_nth,3,$(file))) \ + $(call extract_nth,4,$(file)) &&) +prebuilt-files = $(foreach file,$(cbfs-files), $(call extract_nth,1,$(file))) + +$(obj)/coreboot.pre1: $(objcbfs)/bootblock.bin $$(prebuilt-files) $(CBFSTOOL) + $(CBFSTOOL) $@.tmp create -m armv7 -s $(CONFIG_COREBOOT_ROMSIZE_KB)K \ + -B $(objcbfs)/bootblock.bin -a 64 \ + -o $$(( $(CONFIG_ROM_SIZE) - $(CONFIG_CBFS_SIZE) )) + $(prebuild-files) true + mv $@.tmp $@ +else +.PHONY: $(obj)/coreboot.pre1 +$(obj)/coreboot.pre1: $(CBFSTOOL) + mv $(obj)/coreboot.rom $@ +endif + +$(obj)/coreboot.rom: $(obj)/coreboot.pre $(objcbfs)/coreboot_ram.elf $(CBFSTOOL) $(call strip_quotes,$(COREBOOT_ROM_DEPENDENCIES)) + @printf " CBFS $(subst $(obj)/,,$(@))\n" + cp $(obj)/coreboot.pre $@.tmp + if [ -f $(objcbfs)/coreboot_ap.elf ]; \ + then \ + $(CBFSTOOL) $@.tmp add-stage -f $(objcbfs)/coreboot_ap.elf -n $(CONFIG_CBFS_PREFIX)/coreboot_ap -c $(CBFS_COMPRESS_FLAG) ; \ + fi + $(CBFSTOOL) $@.tmp add-stage -f $(objcbfs)/coreboot_ram.elf -n $(CONFIG_CBFS_PREFIX)/coreboot_ram -c $(CBFS_COMPRESS_FLAG) +ifeq ($(CONFIG_PAYLOAD_NONE),y) + @printf " PAYLOAD \e[1;31mnone (as specified by user)\e[0m\n" +endif +ifeq ($(CONFIG_PAYLOAD_ELF),y) + @printf " PAYLOAD $(CONFIG_PAYLOAD_FILE) (compression: $(CBFS_PAYLOAD_COMPRESS_NAME))\n" + $(CBFSTOOL) $@.tmp add-payload $(CONFIG_PAYLOAD_FILE) $(CONFIG_CBFS_PREFIX)/payload $(CBFS_PAYLOAD_COMPRESS_FLAG) +endif +ifeq ($(CONFIG_INCLUDE_CONFIG_FILE),y) + @printf " CONFIG $(DOTCONFIG)\n" + if [ -f $(DOTCONFIG) ]; then \ + echo "# This image was built using git revision" `git rev-parse HEAD` > $(obj)/config.tmp ; \ + sed -e '/^#/d' -e '/^ *$$/d' $(DOTCONFIG) >> $(obj)/config.tmp ; \ + $(CBFSTOOL) $@.tmp add -f $(obj)/config.tmp -n config -t raw; rm -f $(obj)/config.tmp ; fi +endif + mv $@.tmp $@ + @printf " CBFSPRINT $(subst $(obj)/,,$(@))\n\n" + $(CBFSTOOL) $@ print + +bootsplash.jpg-file := $(call strip_quotes,$(CONFIG_BOOTSPLASH_FILE)) +bootsplash.jpg-type := bootsplash + +################################################################################ +# armv7 specific tools + +################################################################################ +# Common recipes for all stages + +$(objcbfs)/%.bin: $(objcbfs)/%.elf + @printf " OBJCOPY $(subst $(obj)/,,$(@))\n" + $(OBJCOPY) -O binary $< $@ + +$(objcbfs)/%.elf: $(objcbfs)/%.debug + @printf " OBJCOPY $(subst $(obj)/,,$(@))\n" + cp $< $@.tmp + $(NM) -n $@.tmp | sort > $(basename $@).map + $(OBJCOPY) --strip-debug $@.tmp + $(OBJCOPY) --add-gnu-debuglink=$< $@.tmp + mv $@.tmp $@ + +################################################################################ +# Build the coreboot_ram (stage 2) + +$(objcbfs)/coreboot_ram.debug: $(objgenerated)/coreboot_ram.o $(src)/arch/armv7/coreboot_ram.ld + @printf " CC $(subst $(obj)/,,$(@))\n" +ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y) + $(LD) -m armelf_linux_eabi -o $@ -L$(obj) $< -T $(src)/arch/armv7/coreboot_ram.ld +else + $(CC) -nostdlib -nostartfiles -static -o $@ -L$(obj) -T $(src)/arch/armv7/coreboot_ram.ld $< +endif + +$(objgenerated)/coreboot_ram.o: $$(ramstage-objs) $(LIBGCC_FILE_NAME) + @printf " CC $(subst $(obj)/,,$(@))\n" +ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y) + $(LD) -m -m armelf_linux_eabi -r -o $@ --wrap __divdi3 --wrap __udivdi3 --wrap __moddi3 --wrap __umoddi3 --wrap __uidiv --wrap __do_div64 --start-group $(ramstage-objs) $(LIBGCC_FILE_NAME) --end-group +else + $(CC) -nostdlib -r -o $@ -Wl,--start-group $(ramstage-objs) $(LIBGCC_FILE_NAME) -Wl,--end-group +endif + +################################################################################ +# Ramstage for AP CPU (AMD K8, obsolete?) + +#$(objcbfs)/coreboot_ap.debug: $(objgenerated)/coreboot_ap.o $(src)/arch/armv7/init/ldscript_apc.lb +# @printf " CC $(subst $(obj)/,,$(@))\n" +# $(CC) -nostdlib -nostartfiles -static -o $@ -L$(obj) -T $(src)/arch/armv7/init/ldscript_apc.lb $< + +#$(objgenerated)/coreboot_ap.o: $(src)/mainboard/$(MAINBOARDDIR)/ap_romstage.c $(OPTION_TABLE_H) +# @printf " CC $(subst $(obj)/,,$(@))\n" +# $(CC) -MMD $(CFLAGS) -I$(src) -D__PRE_RAM__ -I. -I$(obj) -c $< -o $@ + +################################################################################ +# done + +# For various headers imported from Linux +CFLAGS += -D__KERNEL__ +CFLAGS += -D__LINUX_ARM_ARCH__=7 +INCLUDES += -Isrc/include/linux +INCLUDES += -Isrc/include/linux/uapi + +# FIXME(dhendrix): trying to split start.S apart... +crt0s = $(src)/arch/armv7/start.S +#crt0s = $(src)/arch/armv7/romstage.S +ldscripts = +ldscripts += $(src)/arch/armv7/romstage.ld + +#crt0s += $(src)/cpu/arm/fpu_enable.inc +# FIXME: CONFIG_NEON or something similar for ARM? +#ifeq ($(CONFIG_SSE),y) +#crt0s += $(src)/cpu/arm/sse_enable.inc +#endif + +crt0s += $(cpu_incs) +crt0s += $(cpu_incs-y) + +ifeq ($(CONFIG_LLSHELL),y) +crt0s += $(src)/arch/armv7/llshell/llshell.inc +endif + +crt0s += $(obj)/mainboard/$(MAINBOARDDIR)/romstage.inc + +$(obj)/mainboard/$(MAINBOARDDIR)/romstage.pre.inc: $(src)/mainboard/$(MAINBOARDDIR)/romstage.c $(OPTION_TABLE_H) $(obj)/build.h $(obj)/config.h + @printf " CC romstage.inc\n" + $(CC) -MMD $(CFLAGS) -D__PRE_RAM__ -I$(src) -I. -I$(obj) -c -S $< -o $@ + +$(obj)/mainboard/$(MAINBOARDDIR)/romstage.inc: $(obj)/mainboard/$(MAINBOARDDIR)/romstage.pre.inc + @printf " POST romstage.inc\n" + sed -e 's/\.rodata/.rom.data/g' -e 's/\^\.text/.section .rom.text/g' \ + -e 's/\^\.section \.text/.section .rom.text/g' $^ > $@.tmp + mv $@.tmp $@ + +# Things that appear in every board +romstage-srcs += $(objgenerated)/crt0.s +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/mainboard.c +ifeq ($(CONFIG_GENERATE_PIRQ_TABLE),y) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/irq_tables.c +endif +ifeq ($(CONFIG_BOARD_HAS_HARD_RESET),y) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/reset.c +endif +ifeq ($(CONFIG_GENERATE_ACPI_TABLES),y) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/acpi_tables.c +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/dsdt.asl +# make doesn't have arithmetic operators or greater-than comparisons +ifeq ($(subst 5,4,$(CONFIG_ACPI_SSDTX_NUM)),4) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/ssdt2.asl +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/ssdt3.asl +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/ssdt4.asl +endif +ifeq ($(CONFIG_ACPI_SSDTX_NUM),5) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/ssdt5.asl +endif +ifeq ($(CONFIG_BOARD_HAS_FADT),y) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/fadt.c +endif +endif + +ifeq ($(CONFIG_HAVE_BUS_CONFIG),y) +ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/get_bus_conf.c +endif + +################################################################################ +# Build the final rom image + +$(obj)/coreboot.pre: $(objcbfs)/romstage_xip.elf $(obj)/coreboot.pre1 $(CBFSTOOL) + @printf " CBFS $(subst $(obj)/,,$(@))\n" + cp $(obj)/coreboot.pre1 $@.tmp + $(CBFSTOOL) $@.tmp add-stage \ + -f $(objcbfs)/romstage_xip.elf \ + -n $(CONFIG_CBFS_PREFIX)/romstage -c none \ + -b $(shell cat $(objcbfs)/base_xip.txt) + mv $@.tmp $@ + +################################################################################ +# Build the bootblock + +#bootblock_lds = $(src)/arch/armv7/ldscript_fallback_cbfs.lb +bootblock_lds = $(src)/arch/armv7/lib/id.lds +#bootblock_lds = $(src)/arch/armv7/romstage.ld +bootblock_lds += $(chipset_bootblock_lds) + +bootblock_inc += $(src)/arch/armv7/lib/id.inc +bootblock_inc += $(chipset_bootblock_inc) + +# FIXME: CONFIG_NEON or something similar for ARM? +#ifeq ($(CONFIG_SSE),y) +#bootblock_inc += $(src)/cpu/x86/sse_enable.inc +#endif +#bootblock_inc += $(objgenerated)/bootblock.inc + +$(objgenerated)/bootblock.ld: $$(bootblock_lds) $(obj)/ldoptions + @printf " GEN $(subst $(obj)/,,$(@))\n" + printf '$(foreach ldscript,ldoptions $(bootblock_lds),INCLUDE "$(ldscript)"\n)' > $@ + +$(objgenerated)/bootblock_inc.S: $$(bootblock_inc) + @printf " GEN $(subst $(obj)/,,$(@))\n" + printf '$(foreach crt0,$(bootblock_inc),#include "$(crt0)"\n)' > $@ + +$(objgenerated)/bootblock.o: $(objgenerated)/bootblock.s + @printf " CC $(subst $(obj)/,,$(@))\n" + $(CC) -Wa,-acdlns -c -o $@ $< > $(basename $@).disasm + +$(objgenerated)/bootblock.s: $(objgenerated)/bootblock_inc.S $(obj)/config.h $(obj)/build.h + @printf " CC $(subst $(obj)/,,$(@))\n" + $(CC) -MMD -x assembler-with-cpp -E -I$(src)/include -I$(src)/arch/armv7/include -I$(obj) -include $(obj)/build.h -include $(obj)/config.h -I. -I$(src) $< -o $@ + +#$(objgenerated)/bootblock.inc: $(src)/arch/armv7/init/$(subst ",,$(CONFIG_BOOTBLOCK_SOURCE)) $(objutil)/romcc/romcc $(OPTION_TABLE_H) +# @printf " ROMCC $(subst $(obj)/,,$(@))\n" +# $(CC) $(INCLUDES) -MM -MT$(objgenerated)/bootblock.inc \ +# $< > $(objgenerated)/bootblock.inc.d +# $(ROMCC) -c -S $(bootblock_romccflags) $(ROMCCFLAGS) -I. $(INCLUDES) $< -o $@ + +$(objcbfs)/bootblock.debug: $(objgenerated)/bootblock.o $(objgenerated)/bootblock.ld + @printf " LINK $(subst $(obj)/,,$(@))\n" +ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y) + $(LD) -m armelf_linux_eabi -static -o $@.tmp -L$(obj) $< -T $(objgenerated)/bootblock.ld +else + $(CC) -nostdlib -nostartfiles -static -o $@ -L$(obj) -T $(objgenerated)/bootblock.ld $< +endif + +################################################################################ +# Build the romstage + +# FIXME(dhendrix): added debug printfs +$(objcbfs)/romstage_null.debug: $$(romstage-objs) $(objgenerated)/romstage_null.ld + @printf " LINK $(subst $(obj)/,,$(@))\n" +ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y) + $(LD) -nostdlib -nostartfiles -static -o $@ -L$(obj) $(romstage-objs) -T $(objgenerated)/romstage_null.ld +else + $(CC) -nostdlib -nostartfiles -static -o $@ -L$(obj) -T $(objgenerated)/romstage_null.ld -Wl,--start-group $(romstage-objs) $(LIBGCC_FILE_NAME) -Wl,--end-group +endif + +$(objcbfs)/romstage_xip.debug: $$(romstage-objs) $(objgenerated)/romstage_xip.ld + @printf " LINK $(subst $(obj)/,,$(@))\n" +ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y) + $(LD) -nostdlib -nostartfiles -static -o $@ -L$(obj) $(romstage-objs) -T $(objgenerated)/romstage_xip.ld +else + $(CC) -nostdlib -nostartfiles -static -o $@ -L$(obj) -T $(objgenerated)/romstage_xip.ld -Wl,--start-group $(romstage-objs) $(LIBGCC_FILE_NAME) -Wl,--end-group +endif + +$(objgenerated)/romstage_null.ld: $$(ldscripts) $(obj)/ldoptions + @printf " GEN $(subst $(obj)/,,$(@))\n" + rm -f $@ + printf "ROMSTAGE_BASE = 0x0;\n" > $@.tmp + printf '$(foreach ldscript,ldoptions $(ldscripts),INCLUDE "$(ldscript:$(obj)/%=%)"\n)' >> $@.tmp + mv $@.tmp $@ + +$(objgenerated)/romstage_xip.ld: $(objgenerated)/romstage_null.ld $(objcbfs)/base_xip.txt + @printf " GEN $(subst $(obj)/,,$(@))\n" + rm -f $@ + sed -e 's/^/ROMSTAGE_BASE = /g' -e 's/$$/;/g' $(objcbfs)/base_xip.txt > $@.tmp + sed -e '/ROMSTAGE_BASE/d' $(objgenerated)/romstage_null.ld >> $@.tmp + mv $@.tmp $@ + +$(objcbfs)/base_xip.txt: $(obj)/coreboot.pre1 $(objcbfs)/romstage_null.bin + @printf " generating base_xip.txt\n" + rm -f $@ + $(CBFSTOOL) $(obj)/coreboot.pre1 locate -f $(objcbfs)/romstage_null.bin -n $(CONFIG_CBFS_PREFIX)/romstage -a $(CONFIG_XIP_ROM_SIZE) > $@.tmp \ + || { echo "The romstage is larger than XIP size. Please expand the CONFIG_XIP_ROM_SIZE" ; exit 1; } + sed -e 's/^/0x/g' $@.tmp > $@.tmp2 + rm $@.tmp + mv $@.tmp2 $@ + +$(objgenerated)/crt0.romstage.S: $$(crt0s) + @printf " GEN $(subst $(obj)/,,$(@))\n" + printf '$(foreach crt0,$(crt0s),#include "$(crt0:$(obj)/%=%)"\n)' > $@ + +$(objgenerated)/crt0.romstage.o: $(objgenerated)/crt0.s + @printf " CC $(subst $(obj)/,,$(@))\n" + $(CC) -Wa,-acdlns -c -o $@ $< > $(basename $@).disasm + +$(objgenerated)/crt0.s: $(objgenerated)/crt0.romstage.S $(obj)/config.h $(obj)/build.h + @printf " CC $(subst $(obj)/,,$(@))\n" + $(CC) -MMD -x assembler-with-cpp -E -I$(src)/include -I$(src)/arch/armv7/include -I$(obj) -include $(obj)/config.h -include $(obj)/build.h -I. -I$(src) $< -o $@ + diff --git a/src/arch/armv7/boot/Makefile.inc b/src/arch/armv7/boot/Makefile.inc new file mode 100644 index 0000000000..a0752d6722 --- /dev/null +++ b/src/arch/armv7/boot/Makefile.inc @@ -0,0 +1,13 @@ +ramstage-y += boot.c +ramstage-y += coreboot_table.c +#ramstage-$(CONFIG_MULTIBOOT) += multiboot.c +ramstage-y += tables.c +#ramstage-$(CONFIG_GENERATE_ACPI_TABLES) += acpi.c +#ramstage-$(CONFIG_GENERATE_ACPI_TABLES) += acpigen.c +#ramstage-$(CONFIG_HAVE_ACPI_RESUME) += wakeup.S + +#FIXME(dhendrix): is there anything preventing multiboot from +#working on ARM? + +$(obj)/arch/armv7/boot/coreboot_table.ramstage.o : $(OPTION_TABLE_H) +#$(obj)/arch/x86/boot/smbios.ramstage.o: $(obj)/build.h diff --git a/src/arch/armv7/boot/acpi.c b/src/arch/armv7/boot/acpi.c new file mode 100644 index 0000000000..4d405d9fe0 --- /dev/null +++ b/src/arch/armv7/boot/acpi.c @@ -0,0 +1,791 @@ +/* + * This file is part of the coreboot project. + * + * coreboot ACPI Table support + * written by Stefan Reinauer + * + * Copyright (C) 2004 SUSE LINUX AG + * Copyright (C) 2005-2009 coresystems GmbH + * + * ACPI FADT, FACS, and DSDT table support added by + * Nick Barker , and those portions + * Copyright (C) 2004 Nick Barker + * + * Copyright (C) 2005 ADVANCED MICRO DEVICES, INC. All Rights Reserved. + * 2005.9 yhlu add SRAT table generation + */ + +/* + * Each system port implementing ACPI has to provide two functions: + * + * write_acpi_tables() + * acpi_dump_apics() + * + * See Kontron 986LCD-M port for a good example of an ACPI implementation + * in coreboot. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#if CONFIG_COLLECT_TIMESTAMPS +#include +#endif + +/* FIXME: Kconfig doesn't support overridable defaults :-( */ +#ifndef CONFIG_HPET_MIN_TICKS +#define CONFIG_HPET_MIN_TICKS 0x1000 +#endif + +u8 acpi_checksum(u8 *table, u32 length) +{ + u8 ret = 0; + while (length--) { + ret += *table; + table++; + } + return -ret; +} + +/** + * Add an ACPI table to the RSDT (and XSDT) structure, recalculate length + * and checksum. + */ +void acpi_add_table(acpi_rsdp_t *rsdp, void *table) +{ + int i, entries_num; + acpi_rsdt_t *rsdt; + acpi_xsdt_t *xsdt = NULL; + + /* The RSDT is mandatory... */ + rsdt = (acpi_rsdt_t *)rsdp->rsdt_address; + + /* ...while the XSDT is not. */ + if (rsdp->xsdt_address) + xsdt = (acpi_xsdt_t *)((u32)rsdp->xsdt_address); + + /* This should always be MAX_ACPI_TABLES. */ + entries_num = ARRAY_SIZE(rsdt->entry); + + for (i = 0; i < entries_num; i++) { + if (rsdt->entry[i] == 0) + break; + } + + if (i >= entries_num) { + printk(BIOS_ERR, "ACPI: Error: Could not add ACPI table, " + "too many tables.\n"); + return; + } + + /* Add table to the RSDT. */ + rsdt->entry[i] = (u32)table; + + /* Fix RSDT length or the kernel will assume invalid entries. */ + rsdt->header.length = sizeof(acpi_header_t) + (sizeof(u32) * (i + 1)); + + /* Re-calculate checksum. */ + rsdt->header.checksum = 0; /* Hope this won't get optimized away */ + rsdt->header.checksum = acpi_checksum((u8 *)rsdt, rsdt->header.length); + + /* + * And now the same thing for the XSDT. We use the same index as for + * now we want the XSDT and RSDT to always be in sync in coreboot. + */ + if (xsdt) { + /* Add table to the XSDT. */ + xsdt->entry[i] = (u64)(u32)table; + + /* Fix XSDT length. */ + xsdt->header.length = sizeof(acpi_header_t) + + (sizeof(u64) * (i + 1)); + + /* Re-calculate checksum. */ + xsdt->header.checksum = 0; + xsdt->header.checksum = acpi_checksum((u8 *)xsdt, + xsdt->header.length); + } + + printk(BIOS_DEBUG, "ACPI: added table %d/%d, length now %d\n", + i + 1, entries_num, rsdt->header.length); +} + +int acpi_create_mcfg_mmconfig(acpi_mcfg_mmconfig_t *mmconfig, u32 base, + u16 seg_nr, u8 start, u8 end) +{ + mmconfig->base_address = base; + mmconfig->base_reserved = 0; + mmconfig->pci_segment_group_number = seg_nr; + mmconfig->start_bus_number = start; + mmconfig->end_bus_number = end; + + return sizeof(acpi_mcfg_mmconfig_t); +} + +int acpi_create_madt_lapic(acpi_madt_lapic_t *lapic, u8 cpu, u8 apic) +{ + lapic->type = 0; /* Local APIC structure */ + lapic->length = sizeof(acpi_madt_lapic_t); + lapic->flags = (1 << 0); /* Processor/LAPIC enabled */ + lapic->processor_id = cpu; + lapic->apic_id = apic; + + return lapic->length; +} + +unsigned long acpi_create_madt_lapics(unsigned long current) +{ + device_t cpu; + int index = 0; + + for (cpu = all_devices; cpu; cpu = cpu->next) { + if ((cpu->path.type != DEVICE_PATH_APIC) || + (cpu->bus->dev->path.type != DEVICE_PATH_APIC_CLUSTER)) { + continue; + } + if (!cpu->enabled) + continue; + current += acpi_create_madt_lapic((acpi_madt_lapic_t *)current, + index, cpu->path.apic.apic_id); + index++; + } + + return current; +} + +int acpi_create_madt_ioapic(acpi_madt_ioapic_t *ioapic, u8 id, u32 addr, + u32 gsi_base) +{ + ioapic->type = 1; /* I/O APIC structure */ + ioapic->length = sizeof(acpi_madt_ioapic_t); + ioapic->reserved = 0x00; + ioapic->gsi_base = gsi_base; + ioapic->ioapic_id = id; + ioapic->ioapic_addr = addr; + + return ioapic->length; +} + +int acpi_create_madt_irqoverride(acpi_madt_irqoverride_t *irqoverride, + u8 bus, u8 source, u32 gsirq, u16 flags) +{ + irqoverride->type = 2; /* Interrupt source override */ + irqoverride->length = sizeof(acpi_madt_irqoverride_t); + irqoverride->bus = bus; + irqoverride->source = source; + irqoverride->gsirq = gsirq; + irqoverride->flags = flags; + + return irqoverride->length; +} + +int acpi_create_madt_lapic_nmi(acpi_madt_lapic_nmi_t *lapic_nmi, u8 cpu, + u16 flags, u8 lint) +{ + lapic_nmi->type = 4; /* Local APIC NMI structure */ + lapic_nmi->length = sizeof(acpi_madt_lapic_nmi_t); + lapic_nmi->flags = flags; + lapic_nmi->processor_id = cpu; + lapic_nmi->lint = lint; + + return lapic_nmi->length; +} + +void acpi_create_madt(acpi_madt_t *madt) +{ + acpi_header_t *header = &(madt->header); + unsigned long current = (unsigned long)madt + sizeof(acpi_madt_t); + + memset((void *)madt, 0, sizeof(acpi_madt_t)); + + /* Fill out header fields. */ + memcpy(header->signature, "APIC", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_madt_t); + header->revision = 1; /* ACPI 1.0/2.0: 1, ACPI 3.0: 2, ACPI 4.0: 3 */ + + madt->lapic_addr = LOCAL_APIC_ADDR; + madt->flags = 0x1; /* PCAT_COMPAT */ + + current = acpi_fill_madt(current); + + /* (Re)calculate length and checksum. */ + header->length = current - (unsigned long)madt; + + header->checksum = acpi_checksum((void *)madt, header->length); +} + +/* MCFG is defined in the PCI Firmware Specification 3.0. */ +void acpi_create_mcfg(acpi_mcfg_t *mcfg) +{ + acpi_header_t *header = &(mcfg->header); + unsigned long current = (unsigned long)mcfg + sizeof(acpi_mcfg_t); + + memset((void *)mcfg, 0, sizeof(acpi_mcfg_t)); + + /* Fill out header fields. */ + memcpy(header->signature, "MCFG", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_mcfg_t); + header->revision = 1; + + current = acpi_fill_mcfg(current); + + /* (Re)calculate length and checksum. */ + header->length = current - (unsigned long)mcfg; + header->checksum = acpi_checksum((void *)mcfg, header->length); +} + +/* + * This can be overriden by platform ACPI setup code, if it calls + * acpi_create_ssdt_generator(). + */ +unsigned long __attribute__((weak)) acpi_fill_ssdt_generator( + unsigned long current, const char *oem_table_id) +{ + return current; +} + +void acpi_create_ssdt_generator(acpi_header_t *ssdt, const char *oem_table_id) +{ + unsigned long current = (unsigned long)ssdt + sizeof(acpi_header_t); + + memset((void *)ssdt, 0, sizeof(acpi_header_t)); + + memcpy(&ssdt->signature, "SSDT", 4); + ssdt->revision = 2; /* ACPI 1.0/2.0: ?, ACPI 3.0/4.0: 2 */ + memcpy(&ssdt->oem_id, OEM_ID, 6); + memcpy(&ssdt->oem_table_id, oem_table_id, 8); + ssdt->oem_revision = 42; + memcpy(&ssdt->asl_compiler_id, ASLC, 4); + ssdt->asl_compiler_revision = 42; + ssdt->length = sizeof(acpi_header_t); + + acpigen_set_current((char *) current); + current = acpi_fill_ssdt_generator(current, oem_table_id); + + /* (Re)calculate length and checksum. */ + ssdt->length = current - (unsigned long)ssdt; + ssdt->checksum = acpi_checksum((void *)ssdt, ssdt->length); +} + +int acpi_create_srat_lapic(acpi_srat_lapic_t *lapic, u8 node, u8 apic) +{ + memset((void *)lapic, 0, sizeof(acpi_srat_lapic_t)); + + lapic->type = 0; /* Processor local APIC/SAPIC affinity structure */ + lapic->length = sizeof(acpi_srat_lapic_t); + lapic->flags = (1 << 0); /* Enabled (the use of this structure). */ + lapic->proximity_domain_7_0 = node; + /* TODO: proximity_domain_31_8, local SAPIC EID, clock domain. */ + lapic->apic_id = apic; + + return lapic->length; +} + +int acpi_create_srat_mem(acpi_srat_mem_t *mem, u8 node, u32 basek, u32 sizek, + u32 flags) +{ + mem->type = 1; /* Memory affinity structure */ + mem->length = sizeof(acpi_srat_mem_t); + mem->base_address_low = (basek << 10); + mem->base_address_high = (basek >> (32 - 10)); + mem->length_low = (sizek << 10); + mem->length_high = (sizek >> (32 - 10)); + mem->proximity_domain = node; + mem->flags = flags; + + return mem->length; +} + +/* http://www.microsoft.com/whdc/system/sysinternals/sratdwn.mspx */ +void acpi_create_srat(acpi_srat_t *srat) +{ + acpi_header_t *header = &(srat->header); + unsigned long current = (unsigned long)srat + sizeof(acpi_srat_t); + + memset((void *)srat, 0, sizeof(acpi_srat_t)); + + /* Fill out header fields. */ + memcpy(header->signature, "SRAT", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_srat_t); + header->revision = 1; /* ACPI 1.0: N/A, 2.0: 1, 3.0: 2, 4.0: 3 */ + + srat->resv = 1; /* Spec: Reserved to 1 for backwards compatibility. */ + + current = acpi_fill_srat(current); + + /* (Re)calculate length and checksum. */ + header->length = current - (unsigned long)srat; + header->checksum = acpi_checksum((void *)srat, header->length); +} + +unsigned long __attribute__((weak)) acpi_fill_dmar(unsigned long current) +{ + return current; +} + +void acpi_create_dmar(acpi_dmar_t *dmar) +{ + acpi_header_t *header = &(dmar->header); + unsigned long current = (unsigned long)dmar + sizeof(acpi_dmar_t); + + memset((void *)dmar, 0, sizeof(acpi_dmar_t)); + + /* Fill out header fields. */ + memcpy(header->signature, "DMAR", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_dmar_t); + header->revision = 1; + + dmar->host_address_width = 40 - 1; /* FIXME: == MTRR size? */ + dmar->flags = 0; + + current = acpi_fill_dmar(current); + + /* (Re)calculate length and checksum. */ + header->length = current - (unsigned long)dmar; + header->checksum = acpi_checksum((void *)dmar, header->length); +} + +unsigned long acpi_create_dmar_drhd(unsigned long current, u8 flags, + u16 segment, u32 bar) +{ + dmar_entry_t *drhd = (dmar_entry_t *)current; + memset(drhd, 0, sizeof(*drhd)); + drhd->type = DMAR_DRHD; + drhd->length = sizeof(*drhd); /* will be fixed up later */ + drhd->flags = flags; + drhd->segment = segment; + drhd->bar = bar; + + return drhd->length; +} + +void acpi_dmar_drhd_fixup(unsigned long base, unsigned long current) +{ + dmar_entry_t *drhd = (dmar_entry_t *)base; + drhd->length = current - base; +} + +unsigned long acpi_create_dmar_drhd_ds_pci(unsigned long current, u8 segment, + u8 dev, u8 fn) +{ + dev_scope_t *ds = (dev_scope_t *)current; + memset(ds, 0, sizeof(*ds)); + ds->type = SCOPE_PCI_ENDPOINT; + ds->length = sizeof(*ds) + 2; /* we don't support longer paths yet */ + ds->start_bus = segment; + ds->path[0].dev = dev; + ds->path[0].fn = fn; + + return ds->length; +} + +/* http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/slit.pdf */ +void acpi_create_slit(acpi_slit_t *slit) +{ + acpi_header_t *header = &(slit->header); + unsigned long current = (unsigned long)slit + sizeof(acpi_slit_t); + + memset((void *)slit, 0, sizeof(acpi_slit_t)); + + /* Fill out header fields. */ + memcpy(header->signature, "SLIT", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_slit_t); + header->revision = 1; /* ACPI 1.0: N/A, ACPI 2.0/3.0/4.0: 1 */ + + current = acpi_fill_slit(current); + + /* (Re)calculate length and checksum. */ + header->length = current - (unsigned long)slit; + header->checksum = acpi_checksum((void *)slit, header->length); +} + +/* http://www.intel.com/hardwaredesign/hpetspec_1.pdf */ +void acpi_create_hpet(acpi_hpet_t *hpet) +{ + acpi_header_t *header = &(hpet->header); + acpi_addr_t *addr = &(hpet->addr); + + memset((void *)hpet, 0, sizeof(acpi_hpet_t)); + + /* Fill out header fields. */ + memcpy(header->signature, "HPET", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_hpet_t); + header->revision = 1; /* Currently 1. Table added in ACPI 2.0. */ + + /* Fill out HPET address. */ + addr->space_id = 0; /* Memory */ + addr->bit_width = 64; + addr->bit_offset = 0; + addr->addrl = CONFIG_HPET_ADDRESS & 0xffffffff; + addr->addrh = ((unsigned long long)CONFIG_HPET_ADDRESS) >> 32; + + hpet->id = *(unsigned int*)CONFIG_HPET_ADDRESS; + hpet->number = 0; + hpet->min_tick = CONFIG_HPET_MIN_TICKS; + + header->checksum = acpi_checksum((void *)hpet, sizeof(acpi_hpet_t)); +} + +void acpi_create_facs(acpi_facs_t *facs) +{ + memset((void *)facs, 0, sizeof(acpi_facs_t)); + + memcpy(facs->signature, "FACS", 4); + facs->length = sizeof(acpi_facs_t); + facs->hardware_signature = 0; + facs->firmware_waking_vector = 0; + facs->global_lock = 0; + facs->flags = 0; + facs->x_firmware_waking_vector_l = 0; + facs->x_firmware_waking_vector_h = 0; + facs->version = 1; /* ACPI 1.0: 0, ACPI 2.0/3.0: 1, ACPI 4.0: 2 */ +} + +void acpi_write_rsdt(acpi_rsdt_t *rsdt) +{ + acpi_header_t *header = &(rsdt->header); + + /* Fill out header fields. */ + memcpy(header->signature, "RSDT", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_rsdt_t); + header->revision = 1; /* ACPI 1.0/2.0/3.0/4.0: 1 */ + + /* Entries are filled in later, we come with an empty set. */ + + /* Fix checksum. */ + header->checksum = acpi_checksum((void *)rsdt, sizeof(acpi_rsdt_t)); +} + +void acpi_write_xsdt(acpi_xsdt_t *xsdt) +{ + acpi_header_t *header = &(xsdt->header); + + /* Fill out header fields. */ + memcpy(header->signature, "XSDT", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + + header->length = sizeof(acpi_xsdt_t); + header->revision = 1; /* ACPI 1.0: N/A, 2.0/3.0/4.0: 1 */ + + /* Entries are filled in later, we come with an empty set. */ + + /* Fix checksum. */ + header->checksum = acpi_checksum((void *)xsdt, sizeof(acpi_xsdt_t)); +} + +void acpi_write_rsdp(acpi_rsdp_t *rsdp, acpi_rsdt_t *rsdt, acpi_xsdt_t *xsdt) +{ + memset(rsdp, 0, sizeof(acpi_rsdp_t)); + + memcpy(rsdp->signature, RSDP_SIG, 8); + memcpy(rsdp->oem_id, OEM_ID, 6); + + rsdp->length = sizeof(acpi_rsdp_t); + rsdp->rsdt_address = (u32)rsdt; + + /* + * Revision: ACPI 1.0: 0, ACPI 2.0/3.0/4.0: 2. + * + * Some OSes expect an XSDT to be present for RSD PTR revisions >= 2. + * If we don't have an ACPI XSDT, force ACPI 1.0 (and thus RSD PTR + * revision 0). + */ + if (xsdt == NULL) { + rsdp->revision = 0; + } else { + rsdp->xsdt_address = (u64)(u32)xsdt; + rsdp->revision = 2; + } + + /* Calculate checksums. */ + rsdp->checksum = acpi_checksum((void *)rsdp, 20); + rsdp->ext_checksum = acpi_checksum((void *)rsdp, sizeof(acpi_rsdp_t)); +} + +unsigned long __attribute__((weak)) acpi_fill_hest(acpi_hest_t *hest) +{ + return (unsigned long)hest; +} + +unsigned long acpi_create_hest_error_source(acpi_hest_t *hest, acpi_hest_esd_t *esd, u16 type, void *data, u16 data_len) +{ + acpi_header_t *header = &(hest->header); + acpi_hest_hen_t *hen; + void *pos; + u16 len; + + pos = esd; + memset(pos, 0, sizeof(acpi_hest_esd_t)); + len = 0; + esd->type = type; /* MCE */ + esd->source_id = hest->error_source_count; + esd->flags = 0; /* FIRMWARE_FIRST */ + esd->enabled = 1; + esd->prealloc_erecords = 1; + esd->max_section_per_record = 0x1; + + len += sizeof(acpi_hest_esd_t); + pos = esd + 1; + + switch (type) { + case 0: /* MCE */ + break; + case 1: /* CMC */ + hen = (acpi_hest_hen_t *) (pos); + memset(pos, 0, sizeof(acpi_hest_hen_t)); + hen->type = 3; /* SCI? */ + hen->length = sizeof(acpi_hest_hen_t); + hen->conf_we = 0; /* Configuration Write Enable. */ + hen->poll_interval = 0; + hen->vector = 0; + hen->sw2poll_threshold_val = 0; + hen->sw2poll_threshold_win = 0; + hen->error_threshold_val = 0; + hen->error_threshold_win = 0; + len += sizeof(acpi_hest_hen_t); + pos = hen + 1; + break; + case 2: /* NMI */ + case 6: /* AER Root Port */ + case 7: /* AER Endpoint */ + case 8: /* AER Bridge */ + case 9: /* Generic Hardware Error Source. */ + /* TODO: */ + break; + default: + printk(BIOS_DEBUG, "Invalid type of Error Source."); + break; + } + hest->error_source_count ++; + + memcpy(pos, data, data_len); + len += data_len; + header->length += len; + + return len; +} + +/* ACPI 4.0 */ +void acpi_write_hest(acpi_hest_t *hest) +{ + acpi_header_t *header = &(hest->header); + + memset(hest, 0, sizeof(acpi_hest_t)); + + memcpy(header->signature, "HEST", 4); + memcpy(header->oem_id, OEM_ID, 6); + memcpy(header->oem_table_id, ACPI_TABLE_CREATOR, 8); + memcpy(header->asl_compiler_id, ASLC, 4); + header->length += sizeof(acpi_hest_t); + header->revision = 1; + + acpi_fill_hest(hest); + + /* Calculate checksums. */ + header->checksum = acpi_checksum((void *)hest, header->length); +} + +#if CONFIG_HAVE_ACPI_RESUME +void suspend_resume(void) +{ + void *wake_vec; + + /* If we happen to be resuming find wakeup vector and jump to OS. */ + wake_vec = acpi_find_wakeup_vector(); + if (wake_vec) { +#if CONFIG_HAVE_SMI_HANDLER + u32 *gnvs_address = cbmem_find(CBMEM_ID_ACPI_GNVS); + + /* Restore GNVS pointer in SMM if found */ + if (gnvs_address && *gnvs_address) { + printk(BIOS_DEBUG, "Restore GNVS pointer to 0x%08x\n", + *gnvs_address); + smm_setup_structures((void *)*gnvs_address, NULL, NULL); + } +#endif + + /* Call mainboard resume handler first, if defined. */ + if (mainboard_suspend_resume) + mainboard_suspend_resume(); + post_code(POST_OS_RESUME); + acpi_jump_to_wakeup(wake_vec); + } +} + +/* This is to be filled by SB code - startup value what was found. */ +u8 acpi_slp_type = 0; + +static int acpi_is_wakeup(void) +{ + /* Both resume from S2 and resume from S3 restart at CPU reset */ + return (acpi_slp_type == 3 || acpi_slp_type == 2); +} + +static acpi_rsdp_t *valid_rsdp(acpi_rsdp_t *rsdp) +{ + if (strncmp((char *)rsdp, RSDP_SIG, sizeof(RSDP_SIG) - 1) != 0) + return NULL; + + printk(BIOS_DEBUG, "Looking on %p for valid checksum\n", rsdp); + + if (acpi_checksum((void *)rsdp, 20) != 0) + return NULL; + printk(BIOS_DEBUG, "Checksum 1 passed\n"); + + if ((rsdp->revision > 1) && (acpi_checksum((void *)rsdp, + rsdp->length) != 0)) + return NULL; + printk(BIOS_DEBUG, "Checksum 2 passed all OK\n"); + + return rsdp; +} + +static acpi_rsdp_t *rsdp; + +void *acpi_get_wakeup_rsdp(void) +{ + return rsdp; +} + +void *acpi_find_wakeup_vector(void) +{ + char *p, *end; + acpi_rsdt_t *rsdt; + acpi_facs_t *facs; + acpi_fadt_t *fadt; + void *wake_vec; + int i; + + rsdp = NULL; + + if (!acpi_is_wakeup()) + return NULL; + + printk(BIOS_DEBUG, "Trying to find the wakeup vector...\n"); + + /* Find RSDP. */ + for (p = (char *)0xe0000; p < (char *)0xfffff; p += 16) { + if ((rsdp = valid_rsdp((acpi_rsdp_t *)p))) + break; + } + + if (rsdp == NULL) + return NULL; + + printk(BIOS_DEBUG, "RSDP found at %p\n", rsdp); + rsdt = (acpi_rsdt_t *) rsdp->rsdt_address; + + end = (char *)rsdt + rsdt->header.length; + printk(BIOS_DEBUG, "RSDT found at %p ends at %p\n", rsdt, end); + + for (i = 0; ((char *)&rsdt->entry[i]) < end; i++) { + fadt = (acpi_fadt_t *)rsdt->entry[i]; + if (strncmp((char *)fadt, "FACP", 4) == 0) + break; + fadt = NULL; + } + + if (fadt == NULL) + return NULL; + + printk(BIOS_DEBUG, "FADT found at %p\n", fadt); + facs = (acpi_facs_t *)fadt->firmware_ctrl; + + if (facs == NULL) { + printk(BIOS_DEBUG, "No FACS found, wake up from S3 not " + "possible.\n"); + return NULL; + } + + printk(BIOS_DEBUG, "FACS found at %p\n", facs); + wake_vec = (void *)facs->firmware_waking_vector; + printk(BIOS_DEBUG, "OS waking vector is %p\n", wake_vec); + + return wake_vec; +} + +#if CONFIG_SMP +extern char *lowmem_backup; +extern char *lowmem_backup_ptr; +extern int lowmem_backup_size; +#endif + +#define WAKEUP_BASE 0x600 + +void (*acpi_do_wakeup)(u32 vector, u32 backup_source, u32 backup_target, + u32 backup_size) __attribute__((regparm(0))) = (void *)WAKEUP_BASE; + +extern unsigned char __wakeup, __wakeup_size; + +void acpi_jump_to_wakeup(void *vector) +{ + u32 acpi_backup_memory = (u32)cbmem_find(CBMEM_ID_RESUME); + + if (!acpi_backup_memory) { + printk(BIOS_WARNING, "ACPI: Backup memory missing. " + "No S3 resume.\n"); + return; + } + +#if CONFIG_SMP + // FIXME: This should go into the ACPI backup memory, too. No pork saussages. + /* + * Just restore the SMP trampoline and continue with wakeup on + * assembly level. + */ + memcpy(lowmem_backup_ptr, lowmem_backup, lowmem_backup_size); +#endif + + /* Copy wakeup trampoline in place. */ + memcpy((void *)WAKEUP_BASE, &__wakeup, (size_t)&__wakeup_size); + +#if CONFIG_COLLECT_TIMESTAMPS + timestamp_add_now(TS_ACPI_WAKE_JUMP); +#endif + + acpi_do_wakeup((u32)vector, acpi_backup_memory, CONFIG_RAMBASE, + HIGH_MEMORY_SAVE); +} +#endif + +void acpi_save_gnvs(u32 gnvs_address) +{ + u32 *gnvs = cbmem_add(CBMEM_ID_ACPI_GNVS, sizeof(*gnvs)); + if (gnvs) + *gnvs = gnvs_address; +} diff --git a/src/arch/armv7/boot/acpigen.c b/src/arch/armv7/boot/acpigen.c new file mode 100644 index 0000000000..47845a01a6 --- /dev/null +++ b/src/arch/armv7/boot/acpigen.c @@ -0,0 +1,729 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2009 Rudolf Marek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* how many nesting we support */ +#define ACPIGEN_LENSTACK_SIZE 10 + +/* if you need to change this, change the acpigen_write_f and + acpigen_patch_len */ + +#define ACPIGEN_MAXLEN 0xfff + +#include +#include +#include +#include + +static char *gencurrent; + +char *len_stack[ACPIGEN_LENSTACK_SIZE]; +int ltop = 0; + +int acpigen_write_len_f(void) +{ + ASSERT(ltop < (ACPIGEN_LENSTACK_SIZE - 1)) + len_stack[ltop++] = gencurrent; + acpigen_emit_byte(0); + acpigen_emit_byte(0); + return 2; +} + +void acpigen_patch_len(int len) +{ + ASSERT(len <= ACPIGEN_MAXLEN) + ASSERT(ltop > 0) + char *p = len_stack[--ltop]; + /* generate store length for 0xfff max */ + p[0] = (0x40 | (len & 0xf)); + p[1] = (len >> 4 & 0xff); + +} + +void acpigen_set_current(char *curr) +{ + gencurrent = curr; +} + +char *acpigen_get_current(void) +{ + return gencurrent; +} + +int acpigen_emit_byte(unsigned char b) +{ + (*gencurrent++) = b; + return 1; +} + +int acpigen_write_package(int nr_el) +{ + int len; + /* package op */ + acpigen_emit_byte(0x12); + len = acpigen_write_len_f(); + acpigen_emit_byte(nr_el); + return len + 2; +} + +int acpigen_write_byte(unsigned int data) +{ + /* byte op */ + acpigen_emit_byte(0xa); + acpigen_emit_byte(data & 0xff); + return 2; +} + +int acpigen_write_dword(unsigned int data) +{ + /* dword op */ + acpigen_emit_byte(0xc); + acpigen_emit_byte(data & 0xff); + acpigen_emit_byte((data >> 8) & 0xff); + acpigen_emit_byte((data >> 16) & 0xff); + acpigen_emit_byte((data >> 24) & 0xff); + return 5; +} + +int acpigen_write_qword(uint64_t data) +{ + /* qword op */ + acpigen_emit_byte(0xe); + acpigen_emit_byte(data & 0xff); + acpigen_emit_byte((data >> 8) & 0xff); + acpigen_emit_byte((data >> 16) & 0xff); + acpigen_emit_byte((data >> 24) & 0xff); + acpigen_emit_byte((data >> 32) & 0xff); + acpigen_emit_byte((data >> 40) & 0xff); + acpigen_emit_byte((data >> 48) & 0xff); + acpigen_emit_byte((data >> 56) & 0xff); + return 9; +} + +int acpigen_write_name_byte(const char *name, uint8_t val) +{ + int len; + len = acpigen_write_name(name); + len += acpigen_write_byte(val); + return len; +} + +int acpigen_write_name_dword(const char *name, uint32_t val) +{ + int len; + len = acpigen_write_name(name); + len += acpigen_write_dword(val); + return len; +} + +int acpigen_write_name_qword(const char *name, uint64_t val) +{ + int len; + len = acpigen_write_name(name); + len += acpigen_write_qword(val); + return len; +} + +int acpigen_emit_stream(const char *data, int size) +{ + int i; + for (i = 0; i < size; i++) { + acpigen_emit_byte(data[i]); + } + return size; +} + +/* The NameString are bit tricky, each element can be 4 chars, if + less its padded with underscore. Check 18.2.2 and 18.4 + and 5.3 of ACPI specs 3.0 for details +*/ + +static int acpigen_emit_simple_namestring(const char *name) { + int i, len = 0; + char ud[] = "____"; + for (i = 0; i < 4; i++) { + if ((name[i] == '\0') || (name[i] == '.')) { + len += acpigen_emit_stream(ud, 4 - i); + break; + } else { + len += acpigen_emit_byte(name[i]); + } + } + return len; +} + +static int acpigen_emit_double_namestring(const char *name, int dotpos) { + int len = 0; + /* mark dual name prefix */ + len += acpigen_emit_byte(0x2e); + len += acpigen_emit_simple_namestring(name); + len += acpigen_emit_simple_namestring(&name[dotpos + 1]); + return len; +} + +static int acpigen_emit_multi_namestring(const char *name) { + int len = 0, count = 0; + unsigned char *pathlen; + /* mark multi name prefix */ + len += acpigen_emit_byte(0x2f); + len += acpigen_emit_byte(0x0); + pathlen = ((unsigned char *) acpigen_get_current()) - 1; + + while (name[0] != '\0') { + len += acpigen_emit_simple_namestring(name); + /* find end or next entity */ + while ((name[0] != '.') && (name[0] != '\0')) + name++; + /* forward to next */ + if (name[0] == '.') + name++; + count++; + } + + pathlen[0] = count; + return len; +} + + +int acpigen_emit_namestring(const char *namepath) { + int dotcount = 0, i; + int dotpos = 0; + int len = 0; + + /* we can start with a \ */ + if (namepath[0] == '\\') { + len += acpigen_emit_byte('\\'); + namepath++; + } + + /* and there can be any number of ^ */ + while (namepath[0] == '^') { + len += acpigen_emit_byte('^'); + namepath++; + } + + ASSERT(namepath[0] != '\0'); + + i = 0; + while (namepath[i] != '\0') { + if (namepath[i] == '.') { + dotcount++; + dotpos = i; + } + i++; + } + + if (dotcount == 0) { + len += acpigen_emit_simple_namestring(namepath); + } else if (dotcount == 1) { + len += acpigen_emit_double_namestring(namepath, dotpos); + } else { + len += acpigen_emit_multi_namestring(namepath); + } + return len; +} + +int acpigen_write_name(const char *name) +{ + int len; + /* name op */ + len = acpigen_emit_byte(0x8); + return len + acpigen_emit_namestring(name); +} + +int acpigen_write_scope(const char *name) +{ + int len; + /* scope op */ + len = acpigen_emit_byte(0x10); + len += acpigen_write_len_f(); + return len + acpigen_emit_namestring(name); +} + +int acpigen_write_processor(u8 cpuindex, u32 pblock_addr, u8 pblock_len) +{ +/* + Processor (\_PR.CPUcpuindex, cpuindex, pblock_addr, pblock_len) + { +*/ + char pscope[16]; + int len; + /* processor op */ + acpigen_emit_byte(0x5b); + acpigen_emit_byte(0x83); + len = acpigen_write_len_f(); + + sprintf(pscope, "\\_PR.CPU%x", (unsigned int) cpuindex); + len += acpigen_emit_namestring(pscope); + acpigen_emit_byte(cpuindex); + acpigen_emit_byte(pblock_addr & 0xff); + acpigen_emit_byte((pblock_addr >> 8) & 0xff); + acpigen_emit_byte((pblock_addr >> 16) & 0xff); + acpigen_emit_byte((pblock_addr >> 24) & 0xff); + acpigen_emit_byte(pblock_len); + return 6 + 2 + len; +} + +int acpigen_write_empty_PCT(void) +{ +/* + Name (_PCT, Package (0x02) + { + ResourceTemplate () + { + Register (FFixedHW, + 0x00, // Bit Width + 0x00, // Bit Offset + 0x0000000000000000, // Address + ,) + }, + + ResourceTemplate () + { + Register (FFixedHW, + 0x00, // Bit Width + 0x00, // Bit Offset + 0x0000000000000000, // Address + ,) + } + }) +*/ + static char stream[] = { + 0x08, 0x5F, 0x50, 0x43, 0x54, 0x12, 0x2C, /* 00000030 "0._PCT.," */ + 0x02, 0x11, 0x14, 0x0A, 0x11, 0x82, 0x0C, 0x00, /* 00000038 "........" */ + 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00000040 "........" */ + 0x00, 0x00, 0x00, 0x00, 0x79, 0x00, 0x11, 0x14, /* 00000048 "....y..." */ + 0x0A, 0x11, 0x82, 0x0C, 0x00, 0x7F, 0x00, 0x00, /* 00000050 "........" */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00000058 "........" */ + 0x00, 0x79, 0x00 + }; + return acpigen_emit_stream(stream, ARRAY_SIZE(stream)); +} + +int acpigen_write_empty_PTC(void) +{ +/* + Name (_PTC, Package (0x02) + { + ResourceTemplate () + { + Register (FFixedHW, + 0x00, // Bit Width + 0x00, // Bit Offset + 0x0000000000000000, // Address + ,) + }, + + ResourceTemplate () + { + Register (FFixedHW, + 0x00, // Bit Width + 0x00, // Bit Offset + 0x0000000000000000, // Address + ,) + } + }) +*/ + int len, nlen, rlen; + acpi_addr_t addr = { + .space_id = ACPI_ADDRESS_SPACE_FIXED, + .bit_width = 0, + .bit_offset = 0, + { + .resv = 0 + }, + .addrl = 0, + .addrh = 0, + }; + + nlen = acpigen_write_name("_PTC"); + len = acpigen_write_package(2); + + /* ControlRegister */ + rlen = acpigen_write_resourcetemplate_header(); + rlen += acpigen_write_register(&addr); + len += acpigen_write_resourcetemplate_footer(rlen); + len += rlen; + + /* StatusRegister */ + rlen = acpigen_write_resourcetemplate_header(); + rlen += acpigen_write_register(&addr); + len += acpigen_write_resourcetemplate_footer(rlen); + len += rlen; + + acpigen_patch_len(len - 1); + return len + nlen; +} + +/* generates a func with max supported P states */ +int acpigen_write_PPC(u8 nr) +{ +/* + Method (_PPC, 0, NotSerialized) + { + Return (nr) + } +*/ + int len; + /* method op */ + acpigen_emit_byte(0x14); + len = acpigen_write_len_f(); + len += acpigen_emit_namestring("_PPC"); + /* no fnarg */ + acpigen_emit_byte(0x00); + /* return */ + acpigen_emit_byte(0xa4); + /* arg */ + len += acpigen_write_byte(nr); + /* add all single bytes */ + len += 3; + acpigen_patch_len(len - 1); + return len; +} + +/* generates a func with max supported P states */ +int acpigen_write_PPC_NVS(void) +{ +/* + Method (_PPC, 0, NotSerialized) + { + Return (PPCM) + } +*/ + int len; + /* method op */ + acpigen_emit_byte(0x14); + len = acpigen_write_len_f(); + len += acpigen_emit_namestring("_PPC"); + /* no fnarg */ + acpigen_emit_byte(0x00); + /* return */ + acpigen_emit_byte(0xa4); + /* arg */ + len += acpigen_emit_namestring("PPCM"); + /* add all single bytes */ + len += 3; + acpigen_patch_len(len - 1); + return len; +} + +int acpigen_write_TPC(const char *gnvs_tpc_limit) +{ +/* + // Sample _TPC method + Method (_TPC, 0, NotSerialized) + { + Return (\TLVL) + } + */ + int len; + + len = acpigen_emit_byte(0x14); /* MethodOp */ + len += acpigen_write_len_f(); /* PkgLength */ + len += acpigen_emit_namestring("_TPC"); + len += acpigen_emit_byte(0x00); /* No Arguments */ + len += acpigen_emit_byte(0xa4); /* ReturnOp */ + len += acpigen_emit_namestring(gnvs_tpc_limit); + acpigen_patch_len(len - 1); + return len; +} + +int acpigen_write_PSS_package(u32 coreFreq, u32 power, u32 transLat, + u32 busmLat, u32 control, u32 status) +{ + int len; + len = acpigen_write_package(6); + len += acpigen_write_dword(coreFreq); + len += acpigen_write_dword(power); + len += acpigen_write_dword(transLat); + len += acpigen_write_dword(busmLat); + len += acpigen_write_dword(control); + len += acpigen_write_dword(status); + // pkglen without the len opcode + acpigen_patch_len(len - 1); + + printk(BIOS_DEBUG, "PSS: %uMHz power %u control 0x%x status 0x%x\n", + coreFreq, power, control, status); + + return len; +} + +int acpigen_write_PSD_package(u32 domain, u32 numprocs, PSD_coord coordtype) +{ + int len, lenh, lenp; + lenh = acpigen_write_name("_PSD"); + lenp = acpigen_write_package(1); + len = acpigen_write_package(5); + len += acpigen_write_byte(5); // 5 values + len += acpigen_write_byte(0); // revision 0 + len += acpigen_write_dword(domain); + len += acpigen_write_dword(coordtype); + len += acpigen_write_dword(numprocs); + acpigen_patch_len(len - 1); + len += lenp; + acpigen_patch_len(len - 1); + return len + lenh; +} + +int acpigen_write_CST_package_entry(acpi_cstate_t *cstate) +{ + int len, len0; + char *start, *end; + + len0 = acpigen_write_package(4); + len = acpigen_write_resourcetemplate_header(); + start = acpigen_get_current(); + acpigen_write_register(&cstate->resource); + end = acpigen_get_current(); + len += end - start; + len += acpigen_write_resourcetemplate_footer(len); + len += len0; + len += acpigen_write_dword(cstate->ctype); + len += acpigen_write_dword(cstate->latency); + len += acpigen_write_dword(cstate->power); + acpigen_patch_len(len - 1); + return len; +} + +int acpigen_write_CST_package(acpi_cstate_t *cstate, int nentries) +{ + int len, lenh, lenp, i; + lenh = acpigen_write_name("_CST"); + lenp = acpigen_write_package(nentries+1); + len = acpigen_write_dword(nentries); + + for (i = 0; i < nentries; i++) + len += acpigen_write_CST_package_entry(cstate + i); + + len += lenp; + acpigen_patch_len(len - 1); + return len + lenh; +} + +int acpigen_write_TSS_package(int entries, acpi_tstate_t *tstate_list) +{ +/* + Sample _TSS package with 100% and 50% duty cycles + Name (_TSS, Package (0x02) + { + Package(){100, 1000, 0, 0x00, 0) + Package(){50, 520, 0, 0x18, 0) + }) + */ + int i, len, plen, nlen; + acpi_tstate_t *tstate = tstate_list; + + nlen = acpigen_write_name("_TSS"); + plen = acpigen_write_package(entries); + + for (i = 0; i < entries; i++) { + len = acpigen_write_package(5); + len += acpigen_write_dword(tstate->percent); + len += acpigen_write_dword(tstate->power); + len += acpigen_write_dword(tstate->latency); + len += acpigen_write_dword(tstate->control); + len += acpigen_write_dword(tstate->status); + acpigen_patch_len(len - 1); + tstate++; + plen += len; + } + + acpigen_patch_len(plen - 1); + return plen + nlen; +} + +int acpigen_write_TSD_package(u32 domain, u32 numprocs, PSD_coord coordtype) +{ + int len, lenh, lenp; + lenh = acpigen_write_name("_TSD"); + lenp = acpigen_write_package(1); + len = acpigen_write_package(5); + len += acpigen_write_byte(5); // 5 values + len += acpigen_write_byte(0); // revision 0 + len += acpigen_write_dword(domain); + len += acpigen_write_dword(coordtype); + len += acpigen_write_dword(numprocs); + acpigen_patch_len(len - 1); + len += lenp; + acpigen_patch_len(len - 1); + return len + lenh; +} + + + +int acpigen_write_mem32fixed(int readwrite, u32 base, u32 size) +{ + /* + * acpi 4.0 section 6.4.3.4: 32-Bit Fixed Memory Range Descriptor + * Byte 0: + * Bit7 : 1 => big item + * Bit6-0: 0000110 (0x6) => 32-bit fixed memory + */ + acpigen_emit_byte(0x86); + /* Byte 1+2: length (0x0009) */ + acpigen_emit_byte(0x09); + acpigen_emit_byte(0x00); + /* bit1-7 are ignored */ + acpigen_emit_byte(readwrite ? 0x01 : 0x00); + acpigen_emit_byte(base & 0xff); + acpigen_emit_byte((base >> 8) & 0xff); + acpigen_emit_byte((base >> 16) & 0xff); + acpigen_emit_byte((base >> 24) & 0xff); + acpigen_emit_byte(size & 0xff); + acpigen_emit_byte((size >> 8) & 0xff); + acpigen_emit_byte((size >> 16) & 0xff); + acpigen_emit_byte((size >> 24) & 0xff); + return 12; +} + +int acpigen_write_register(acpi_addr_t *addr) +{ + acpigen_emit_byte(0x82); /* Register Descriptor */ + acpigen_emit_byte(0x0c); /* Register Length 7:0 */ + acpigen_emit_byte(0x00); /* Register Length 15:8 */ + acpigen_emit_byte(addr->space_id); /* Address Space ID */ + acpigen_emit_byte(addr->bit_width); /* Register Bit Width */ + acpigen_emit_byte(addr->bit_offset); /* Register Bit Offset */ + acpigen_emit_byte(addr->resv); /* Register Access Size */ + acpigen_emit_byte(addr->addrl & 0xff); /* Register Address Low */ + acpigen_emit_byte((addr->addrl >> 8) & 0xff); + acpigen_emit_byte((addr->addrl >> 16) & 0xff); + acpigen_emit_byte((addr->addrl >> 24) & 0xff); + acpigen_emit_byte(addr->addrh & 0xff); /* Register Address High */ + acpigen_emit_byte((addr->addrh >> 8) & 0xff); + acpigen_emit_byte((addr->addrh >> 16) & 0xff); + acpigen_emit_byte((addr->addrh >> 24) & 0xff); + return 15; +} + +int acpigen_write_io16(u16 min, u16 max, u8 align, u8 len, u8 decode16) +{ + /* + * acpi 4.0 section 6.4.2.6: I/O Port Descriptor + * Byte 0: + * Bit7 : 0 => small item + * Bit6-3: 1000 (0x8) => I/O port descriptor + * Bit2-0: 111 (0x7) => 7 Bytes long + */ + acpigen_emit_byte(0x47); + /* does the device decode all 16 or just 10 bits? */ + /* bit1-7 are ignored */ + acpigen_emit_byte(decode16 ? 0x01 : 0x00); + /* minimum base address the device may be configured for */ + acpigen_emit_byte(min & 0xff); + acpigen_emit_byte((min >> 8) & 0xff); + /* maximum base address the device may be configured for */ + acpigen_emit_byte(max & 0xff); + acpigen_emit_byte((max >> 8) & 0xff); + /* alignment for min base */ + acpigen_emit_byte(align & 0xff); + acpigen_emit_byte(len & 0xff); + return 8; +} + +int acpigen_write_resourcetemplate_header(void) +{ + int len; + /* + * A ResourceTemplate() is a Buffer() with a + * (Byte|Word|DWord) containing the length, followed by one or more + * resource items, terminated by the end tag + * (small item 0xf, len 1) + */ + len = acpigen_emit_byte(0x11); /* Buffer opcode */ + len += acpigen_write_len_f(); + len += acpigen_emit_byte(0x0b); /* Word opcode */ + len_stack[ltop++] = acpigen_get_current(); + len += acpigen_emit_byte(0x00); + len += acpigen_emit_byte(0x00); + return len; +} + +int acpigen_write_resourcetemplate_footer(int len) +{ + char *p = len_stack[--ltop]; + /* + * end tag (acpi 4.0 Section 6.4.2.8) + * 0x79 + * 0x00 is treated as a good checksum according to the spec + * and is what iasl generates. + */ + len += acpigen_emit_byte(0x79); + len += acpigen_emit_byte(0x00); + /* patch len word */ + p[0] = (len-6) & 0xff; + p[1] = ((len-6) >> 8) & 0xff; + /* patch len field */ + acpigen_patch_len(len-1); + return 2; +} + +static void acpigen_add_mainboard_rsvd_mem32(void *gp, struct device *dev, + struct resource *res) +{ + acpigen_write_mem32fixed(0, res->base, res->size); +} + +static void acpigen_add_mainboard_rsvd_io(void *gp, struct device *dev, + struct resource *res) +{ + resource_t base = res->base; + resource_t size = res->size; + while (size > 0) { + resource_t sz = size > 255 ? 255 : size; + acpigen_write_io16(base, base, 0, sz, 1); + size -= sz; + base += sz; + } +} + +int acpigen_write_mainboard_resource_template(void) +{ + int len; + char *start; + char *end; + len = acpigen_write_resourcetemplate_header(); + start = acpigen_get_current(); + + /* Add reserved memory ranges */ + search_global_resources( + IORESOURCE_MEM | IORESOURCE_RESERVE, + IORESOURCE_MEM | IORESOURCE_RESERVE, + acpigen_add_mainboard_rsvd_mem32, 0); + + /* Add reserved io ranges */ + search_global_resources( + IORESOURCE_IO | IORESOURCE_RESERVE, + IORESOURCE_IO | IORESOURCE_RESERVE, + acpigen_add_mainboard_rsvd_io, 0); + + end = acpigen_get_current(); + len += end-start; + len += acpigen_write_resourcetemplate_footer(len); + return len; +} + +int acpigen_write_mainboard_resources(const char *scope, const char *name) +{ + int len; + len = acpigen_write_scope(scope); + len += acpigen_write_name(name); + len += acpigen_write_mainboard_resource_template(); + acpigen_patch_len(len - 1); + return len; +} diff --git a/src/arch/armv7/boot/boot.c b/src/arch/armv7/boot/boot.c new file mode 100644 index 0000000000..722fca540f --- /dev/null +++ b/src/arch/armv7/boot/boot.c @@ -0,0 +1,189 @@ +#include +#include +#include +#include +#include +#include + + +#ifndef CMD_LINE +#define CMD_LINE "" +#endif + + + +#define UPSZ(X) ((sizeof(X) + 3) &~3) + +static struct { + Elf_Bhdr hdr; + Elf_Nhdr ft_hdr; + unsigned char ft_desc[UPSZ(FIRMWARE_TYPE)]; + Elf_Nhdr bl_hdr; + unsigned char bl_desc[UPSZ(BOOTLOADER)]; + Elf_Nhdr blv_hdr; + unsigned char blv_desc[UPSZ(BOOTLOADER_VERSION)]; + Elf_Nhdr cmd_hdr; + unsigned char cmd_desc[UPSZ(CMD_LINE)]; +} elf_boot_notes = { + .hdr = { + .b_signature = 0x0E1FB007, + .b_size = sizeof(elf_boot_notes), + .b_checksum = 0, + .b_records = 4, + }, + .ft_hdr = { + .n_namesz = 0, + .n_descsz = sizeof(FIRMWARE_TYPE), + .n_type = EBN_FIRMWARE_TYPE, + }, + .ft_desc = FIRMWARE_TYPE, + .bl_hdr = { + .n_namesz = 0, + .n_descsz = sizeof(BOOTLOADER), + .n_type = EBN_BOOTLOADER_NAME, + }, + .bl_desc = BOOTLOADER, + .blv_hdr = { + .n_namesz = 0, + .n_descsz = sizeof(BOOTLOADER_VERSION), + .n_type = EBN_BOOTLOADER_VERSION, + }, + .blv_desc = BOOTLOADER_VERSION, + .cmd_hdr = { + .n_namesz = 0, + .n_descsz = sizeof(CMD_LINE), + .n_type = EBN_COMMAND_LINE, + }, + .cmd_desc = CMD_LINE, +}; + + +int elf_check_arch(Elf_ehdr *ehdr) +{ + return ( + ((ehdr->e_machine == EM_386) || (ehdr->e_machine == EM_486)) && + (ehdr->e_ident[EI_CLASS] == ELFCLASS32) && + (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + ); + +} + +void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size) +{ + extern unsigned char _ram_seg, _eram_seg; + unsigned long lb_start, lb_size; + unsigned long adjust, adjusted_boot_notes; + + elf_boot_notes.hdr.b_checksum = + compute_ip_checksum(&elf_boot_notes, sizeof(elf_boot_notes)); + + lb_start = (unsigned long)&_ram_seg; + lb_size = (unsigned long)(&_eram_seg - &_ram_seg); + adjust = buffer + size - lb_start; + + adjusted_boot_notes = (unsigned long)&elf_boot_notes; + adjusted_boot_notes += adjust; + + printk(BIOS_SPEW, "entry = 0x%08lx\n", (unsigned long)entry); + printk(BIOS_SPEW, "lb_start = 0x%08lx\n", lb_start); + printk(BIOS_SPEW, "lb_size = 0x%08lx\n", lb_size); + printk(BIOS_SPEW, "adjust = 0x%08lx\n", adjust); + printk(BIOS_SPEW, "buffer = 0x%08lx\n", buffer); + printk(BIOS_SPEW, " elf_boot_notes = 0x%08lx\n", (unsigned long)&elf_boot_notes); + printk(BIOS_SPEW, "adjusted_boot_notes = 0x%08lx\n", adjusted_boot_notes); + + /* FIXME(dhendrix): port code to jump to kernel here... */ +#if 0 + /* Jump to kernel */ + __asm__ __volatile__( + " cld \n\t" + /* Save the callee save registers... */ + " pushl %%esi\n\t" + " pushl %%edi\n\t" + " pushl %%ebx\n\t" + /* Save the parameters I was passed */ + " pushl $0\n\t" /* 20 adjust */ + " pushl %0\n\t" /* 16 lb_start */ + " pushl %1\n\t" /* 12 buffer */ + " pushl %2\n\t" /* 8 lb_size */ + " pushl %3\n\t" /* 4 entry */ + " pushl %4\n\t" /* 0 elf_boot_notes */ + /* Compute the adjustment */ + " xorl %%eax, %%eax\n\t" + " subl 16(%%esp), %%eax\n\t" + " addl 12(%%esp), %%eax\n\t" + " addl 8(%%esp), %%eax\n\t" + " movl %%eax, 20(%%esp)\n\t" + /* Place a copy of coreboot in its new location */ + /* Move ``longs'' the coreboot size is 4 byte aligned */ + " movl 12(%%esp), %%edi\n\t" + " addl 8(%%esp), %%edi\n\t" + " movl 16(%%esp), %%esi\n\t" + " movl 8(%%esp), %%ecx\n\n" + " shrl $2, %%ecx\n\t" + " rep movsl\n\t" + + /* Adjust the stack pointer to point into the new coreboot image */ + " addl 20(%%esp), %%esp\n\t" + /* Adjust the instruction pointer to point into the new coreboot image */ + " movl $1f, %%eax\n\t" + " addl 20(%%esp), %%eax\n\t" + " jmp *%%eax\n\t" + "1: \n\t" + + /* Copy the coreboot bounce buffer over coreboot */ + /* Move ``longs'' the coreboot size is 4 byte aligned */ + " movl 16(%%esp), %%edi\n\t" + " movl 12(%%esp), %%esi\n\t" + " movl 8(%%esp), %%ecx\n\t" + " shrl $2, %%ecx\n\t" + " rep movsl\n\t" + + /* Now jump to the loaded image */ + " movl %5, %%eax\n\t" + " movl 0(%%esp), %%ebx\n\t" + " call *4(%%esp)\n\t" + + /* The loaded image returned? */ + " cli \n\t" + " cld \n\t" + + /* Copy the saved copy of coreboot where coreboot runs */ + /* Move ``longs'' the coreboot size is 4 byte aligned */ + " movl 16(%%esp), %%edi\n\t" + " movl 12(%%esp), %%esi\n\t" + " addl 8(%%esp), %%esi\n\t" + " movl 8(%%esp), %%ecx\n\t" + " shrl $2, %%ecx\n\t" + " rep movsl\n\t" + + /* Adjust the stack pointer to point into the old coreboot image */ + " subl 20(%%esp), %%esp\n\t" + + /* Adjust the instruction pointer to point into the old coreboot image */ + " movl $1f, %%eax\n\t" + " subl 20(%%esp), %%eax\n\t" + " jmp *%%eax\n\t" + "1: \n\t" + + /* Drop the parameters I was passed */ + " addl $24, %%esp\n\t" + + /* Restore the callee save registers */ + " popl %%ebx\n\t" + " popl %%edi\n\t" + " popl %%esi\n\t" + + :: + "ri" (lb_start), "ri" (buffer), "ri" (lb_size), + "ri" (entry), +#if CONFIG_MULTIBOOT + "ri"(mbi), "ri" (MB_MAGIC2) +#else + "ri"(adjusted_boot_notes), "ri" (0x0E1FB007) +#endif + ); +#endif +} + + diff --git a/src/arch/armv7/boot/coreboot_table.c b/src/arch/armv7/boot/coreboot_table.c new file mode 100644 index 0000000000..9f67da0e1b --- /dev/null +++ b/src/arch/armv7/boot/coreboot_table.c @@ -0,0 +1,711 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2003-2004 Eric Biederman + * Copyright (C) 2005-2010 coresystems GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if CONFIG_USE_OPTION_TABLE +#include +#endif +#if CONFIG_CHROMEOS +#include +#include +#endif + +static struct lb_header *lb_table_init(unsigned long addr) +{ + struct lb_header *header; + + /* 16 byte align the address */ + addr += 15; + addr &= ~15; + + header = (void *)addr; + header->signature[0] = 'L'; + header->signature[1] = 'B'; + header->signature[2] = 'I'; + header->signature[3] = 'O'; + header->header_bytes = sizeof(*header); + header->header_checksum = 0; + header->table_bytes = 0; + header->table_checksum = 0; + header->table_entries = 0; + return header; +} + +static struct lb_record *lb_first_record(struct lb_header *header) +{ + struct lb_record *rec; + rec = (void *)(((char *)header) + sizeof(*header)); + return rec; +} + +static struct lb_record *lb_last_record(struct lb_header *header) +{ + struct lb_record *rec; + rec = (void *)(((char *)header) + sizeof(*header) + header->table_bytes); + return rec; +} + +#if 0 +static struct lb_record *lb_next_record(struct lb_record *rec) +{ + rec = (void *)(((char *)rec) + rec->size); + return rec; +} +#endif + +static struct lb_record *lb_new_record(struct lb_header *header) +{ + struct lb_record *rec; + rec = lb_last_record(header); + if (header->table_entries) { + header->table_bytes += rec->size; + } + rec = lb_last_record(header); + header->table_entries++; + rec->tag = LB_TAG_UNUSED; + rec->size = sizeof(*rec); + return rec; +} + + +static struct lb_memory *lb_memory(struct lb_header *header) +{ + struct lb_record *rec; + struct lb_memory *mem; + rec = lb_new_record(header); + mem = (struct lb_memory *)rec; + mem->tag = LB_TAG_MEMORY; + mem->size = sizeof(*mem); + return mem; +} + +static struct lb_serial *lb_serial(struct lb_header *header) +{ +#if CONFIG_CONSOLE_SERIAL8250 + struct lb_record *rec; + struct lb_serial *serial; + rec = lb_new_record(header); + serial = (struct lb_serial *)rec; + serial->tag = LB_TAG_SERIAL; + serial->size = sizeof(*serial); + serial->type = LB_SERIAL_TYPE_IO_MAPPED; + serial->baseaddr = CONFIG_TTYS0_BASE; + serial->baud = CONFIG_TTYS0_BAUD; + return serial; +#elif CONFIG_CONSOLE_SERIAL8250MEM + if (uartmem_getbaseaddr()) { + struct lb_record *rec; + struct lb_serial *serial; + rec = lb_new_record(header); + serial = (struct lb_serial *)rec; + serial->tag = LB_TAG_SERIAL; + serial->size = sizeof(*serial); + serial->type = LB_SERIAL_TYPE_MEMORY_MAPPED; + serial->baseaddr = uartmem_getbaseaddr(); + serial->baud = CONFIG_TTYS0_BAUD; + return serial; + } else { + return NULL; + } +#else + return NULL; +#endif +} + +#if CONFIG_CONSOLE_SERIAL8250 || CONFIG_CONSOLE_SERIAL8250MEM || \ + CONFIG_CONSOLE_LOGBUF || CONFIG_USBDEBUG +static void add_console(struct lb_header *header, u16 consoletype) +{ + struct lb_console *console; + + console = (struct lb_console *)lb_new_record(header); + console->tag = LB_TAG_CONSOLE; + console->size = sizeof(*console); + console->type = consoletype; +} +#endif + +static void lb_console(struct lb_header *header) +{ +#if CONFIG_CONSOLE_SERIAL8250 + add_console(header, LB_TAG_CONSOLE_SERIAL8250); +#endif +#if CONFIG_CONSOLE_SERIAL8250MEM + add_console(header, LB_TAG_CONSOLE_SERIAL8250MEM); +#endif +#if CONFIG_CONSOLE_LOGBUF + add_console(header, LB_TAG_CONSOLE_LOGBUF); +#endif +#if CONFIG_USBDEBUG + add_console(header, LB_TAG_CONSOLE_EHCI); +#endif +} + +static void lb_framebuffer(struct lb_header *header) +{ +#if CONFIG_FRAMEBUFFER_KEEP_VESA_MODE + void fill_lb_framebuffer(struct lb_framebuffer *framebuffer); + int vbe_mode_info_valid(void); + + // If there isn't any mode info to put in the table, don't ask for it + // to be filled with junk. + if (!vbe_mode_info_valid()) + return; + struct lb_framebuffer *framebuffer; + framebuffer = (struct lb_framebuffer *)lb_new_record(header); + framebuffer->tag = LB_TAG_FRAMEBUFFER; + framebuffer->size = sizeof(*framebuffer); + fill_lb_framebuffer(framebuffer); +#endif +} + +#if CONFIG_CHROMEOS +static void lb_gpios(struct lb_header *header) +{ + struct lb_gpios *gpios; + gpios = (struct lb_gpios *)lb_new_record(header); + gpios->tag = LB_TAG_GPIO; + gpios->size = sizeof(*gpios); + gpios->count = 0; + fill_lb_gpios(gpios); +} + +static void lb_vdat(struct lb_header *header) +{ + struct lb_vdat* vdat; + + vdat = (struct lb_vdat *)lb_new_record(header); + vdat->tag = LB_TAG_VDAT; + vdat->size = sizeof(*vdat); + acpi_get_vdat_info(&vdat->vdat_addr, &vdat->vdat_size); +} + +static void lb_vbnv(struct lb_header *header) +{ + struct lb_vbnv* vbnv; + + vbnv = (struct lb_vbnv *)lb_new_record(header); + vbnv->tag = LB_TAG_VBNV; + vbnv->size = sizeof(*vbnv); + vbnv->vbnv_start = CONFIG_VBNV_OFFSET + 14; + vbnv->vbnv_size = CONFIG_VBNV_SIZE; +} +#endif + +static void add_cbmem_pointers(struct lb_header *header) +{ + /* + * These CBMEM sections' addresses are included in the coreboot table + * with the appropriate tags. + */ + const struct section_id { + int cbmem_id; + int table_tag; + } section_ids[] = { + {CBMEM_ID_TIMESTAMP, LB_TAG_TIMESTAMPS}, + {CBMEM_ID_CONSOLE, LB_TAG_CBMEM_CONSOLE} + }; + int i; + + for (i = 0; i < ARRAY_SIZE(section_ids); i++) { + const struct section_id *sid = section_ids + i; + struct lb_cbmem_ref *cbmem_ref; + void *cbmem_addr = cbmem_find(sid->cbmem_id); + + if (!cbmem_addr) + continue; /* This section is not present */ + + cbmem_ref = (struct lb_cbmem_ref *)lb_new_record(header); + if (!cbmem_ref) { + printk(BIOS_ERR, "No more room in coreboot table!\n"); + break; + } + cbmem_ref->tag = sid->table_tag; + cbmem_ref->size = sizeof(*cbmem_ref); + cbmem_ref->cbmem_addr = cbmem_addr; + } +} + +static struct lb_mainboard *lb_mainboard(struct lb_header *header) +{ + struct lb_record *rec; + struct lb_mainboard *mainboard; + rec = lb_new_record(header); + mainboard = (struct lb_mainboard *)rec; + mainboard->tag = LB_TAG_MAINBOARD; + + mainboard->size = (sizeof(*mainboard) + + strlen(mainboard_vendor) + 1 + + strlen(mainboard_part_number) + 1 + + 3) & ~3; + + mainboard->vendor_idx = 0; + mainboard->part_number_idx = strlen(mainboard_vendor) + 1; + + memcpy(mainboard->strings + mainboard->vendor_idx, + mainboard_vendor, strlen(mainboard_vendor) + 1); + memcpy(mainboard->strings + mainboard->part_number_idx, + mainboard_part_number, strlen(mainboard_part_number) + 1); + + return mainboard; +} + +#if CONFIG_USE_OPTION_TABLE +static struct cmos_checksum *lb_cmos_checksum(struct lb_header *header) +{ + struct lb_record *rec; + struct cmos_checksum *cmos_checksum; + rec = lb_new_record(header); + cmos_checksum = (struct cmos_checksum *)rec; + cmos_checksum->tag = LB_TAG_OPTION_CHECKSUM; + + cmos_checksum->size = (sizeof(*cmos_checksum)); + + cmos_checksum->range_start = LB_CKS_RANGE_START * 8; + cmos_checksum->range_end = ( LB_CKS_RANGE_END * 8 ) + 7; + cmos_checksum->location = LB_CKS_LOC * 8; + cmos_checksum->type = CHECKSUM_PCBIOS; + + return cmos_checksum; +} +#endif + +static void lb_strings(struct lb_header *header) +{ + static const struct { + uint32_t tag; + const char *string; + } strings[] = { + { LB_TAG_VERSION, coreboot_version, }, + { LB_TAG_EXTRA_VERSION, coreboot_extra_version, }, + { LB_TAG_BUILD, coreboot_build, }, + { LB_TAG_COMPILE_TIME, coreboot_compile_time, }, + { LB_TAG_COMPILE_BY, coreboot_compile_by, }, + { LB_TAG_COMPILE_HOST, coreboot_compile_host, }, + { LB_TAG_COMPILE_DOMAIN, coreboot_compile_domain, }, + { LB_TAG_COMPILER, coreboot_compiler, }, + { LB_TAG_LINKER, coreboot_linker, }, + { LB_TAG_ASSEMBLER, coreboot_assembler, }, + }; + unsigned int i; + for(i = 0; i < ARRAY_SIZE(strings); i++) { + struct lb_string *rec; + size_t len; + rec = (struct lb_string *)lb_new_record(header); + len = strlen(strings[i].string); + rec->tag = strings[i].tag; + rec->size = (sizeof(*rec) + len + 1 + 3) & ~3; + memcpy(rec->string, strings[i].string, len+1); + } + +} + +#if CONFIG_WRITE_HIGH_TABLES +static struct lb_forward *lb_forward(struct lb_header *header, struct lb_header *next_header) +{ + struct lb_record *rec; + struct lb_forward *forward; + rec = lb_new_record(header); + forward = (struct lb_forward *)rec; + forward->tag = LB_TAG_FORWARD; + forward->size = sizeof(*forward); + forward->forward = (uint64_t)(unsigned long)next_header; + return forward; +} +#endif + +/* FIXME(dhendrix): used to be static void lb_memory_range(), but compiler + started complaining since it shares a name with a non-static struct. ugh. */ +static void new_lb_memory_range(struct lb_memory *mem, + uint32_t type, uint64_t start, uint64_t size) +{ + int entries; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + mem->map[entries].start = pack_lb64(start); + mem->map[entries].size = pack_lb64(size); + mem->map[entries].type = type; + mem->size += sizeof(mem->map[0]); +} + +static void lb_reserve_table_memory(struct lb_header *head) +{ + struct lb_record *last_rec; + struct lb_memory *mem; + uint64_t start; + uint64_t end; + int i, entries; + + last_rec = lb_last_record(head); + mem = get_lb_mem(); + start = (unsigned long)head; + end = (unsigned long)last_rec; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + /* Resize the right two memory areas so this table is in + * a reserved area of memory. Everything has been carefully + * setup so that is all we need to do. + */ + for(i = 0; i < entries; i++ ) { + uint64_t map_start = unpack_lb64(mem->map[i].start); + uint64_t map_end = map_start + unpack_lb64(mem->map[i].size); + /* Does this area need to be expanded? */ + if (map_end == start) { + mem->map[i].size = pack_lb64(end - map_start); + } + /* Does this area need to be contracted? */ + else if (map_start == start) { + mem->map[i].start = pack_lb64(end); + mem->map[i].size = pack_lb64(map_end - end); + } + } +} + +static unsigned long lb_table_fini(struct lb_header *head, int fixup) +{ + struct lb_record *rec, *first_rec; + rec = lb_last_record(head); + if (head->table_entries) { + head->table_bytes += rec->size; + } + + if (fixup) + lb_reserve_table_memory(head); + + first_rec = lb_first_record(head); + head->table_checksum = compute_ip_checksum(first_rec, head->table_bytes); + head->header_checksum = 0; + head->header_checksum = compute_ip_checksum(head, sizeof(*head)); + printk(BIOS_DEBUG, + "Wrote coreboot table at: %p, 0x%x bytes, checksum %x\n", + head, head->table_bytes, head->table_checksum); + return (unsigned long)rec + rec->size; +} + +static void lb_cleanup_memory_ranges(struct lb_memory *mem) +{ + int entries; + int i, j; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + + /* Sort the lb memory ranges */ + for(i = 0; i < entries; i++) { + uint64_t entry_start = unpack_lb64(mem->map[i].start); + for(j = i; j < entries; j++) { + uint64_t temp_start = unpack_lb64(mem->map[j].start); + if (temp_start < entry_start) { + struct lb_memory_range tmp; + tmp = mem->map[i]; + mem->map[i] = mem->map[j]; + mem->map[j] = tmp; + } + } + } + + /* Merge adjacent entries */ + for(i = 0; (i + 1) < entries; i++) { + uint64_t start, end, nstart, nend; + if (mem->map[i].type != mem->map[i + 1].type) { + continue; + } + start = unpack_lb64(mem->map[i].start); + end = start + unpack_lb64(mem->map[i].size); + nstart = unpack_lb64(mem->map[i + 1].start); + nend = nstart + unpack_lb64(mem->map[i + 1].size); + if ((start <= nstart) && (end > nstart)) { + if (start > nstart) { + start = nstart; + } + if (end < nend) { + end = nend; + } + /* Record the new region size */ + mem->map[i].start = pack_lb64(start); + mem->map[i].size = pack_lb64(end - start); + + /* Delete the entry I have merged with */ + memmove(&mem->map[i + 1], &mem->map[i + 2], + ((entries - i - 2) * sizeof(mem->map[0]))); + mem->size -= sizeof(mem->map[0]); + entries -= 1; + /* See if I can merge with the next entry as well */ + i -= 1; + } + } +} + +static void lb_remove_memory_range(struct lb_memory *mem, + uint64_t start, uint64_t size) +{ + uint64_t end; + int entries; + int i; + + end = start + size; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + + /* Remove a reserved area from the memory map */ + for(i = 0; i < entries; i++) { + uint64_t map_start = unpack_lb64(mem->map[i].start); + uint64_t map_end = map_start + unpack_lb64(mem->map[i].size); + if ((start <= map_start) && (end >= map_end)) { + /* Remove the completely covered range */ + memmove(&mem->map[i], &mem->map[i + 1], + ((entries - i - 1) * sizeof(mem->map[0]))); + mem->size -= sizeof(mem->map[0]); + entries -= 1; + /* Since the index will disappear revisit what will appear here */ + i -= 1; + } + else if ((start > map_start) && (end < map_end)) { + /* Split the memory range */ + memmove(&mem->map[i + 1], &mem->map[i], + ((entries - i) * sizeof(mem->map[0]))); + mem->size += sizeof(mem->map[0]); + entries += 1; + /* Update the first map entry */ + mem->map[i].size = pack_lb64(start - map_start); + /* Update the second map entry */ + mem->map[i + 1].start = pack_lb64(end); + mem->map[i + 1].size = pack_lb64(map_end - end); + /* Don't bother with this map entry again */ + i += 1; + } + else if ((start <= map_start) && (end > map_start)) { + /* Shrink the start of the memory range */ + mem->map[i].start = pack_lb64(end); + mem->map[i].size = pack_lb64(map_end - end); + } + else if ((start < map_end) && (start > map_start)) { + /* Shrink the end of the memory range */ + mem->map[i].size = pack_lb64(start - map_start); + } + } +} + +static void lb_add_memory_range(struct lb_memory *mem, + uint32_t type, uint64_t start, uint64_t size) +{ + lb_remove_memory_range(mem, start, size); + new_lb_memory_range(mem, type, start, size); + lb_cleanup_memory_ranges(mem); +} + +static void lb_dump_memory_ranges(struct lb_memory *mem) +{ + int entries; + int i; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + + printk(BIOS_DEBUG, "coreboot memory table:\n"); + for(i = 0; i < entries; i++) { + uint64_t entry_start = unpack_lb64(mem->map[i].start); + uint64_t entry_size = unpack_lb64(mem->map[i].size); + const char *entry_type; + + switch (mem->map[i].type) { + case LB_MEM_RAM: entry_type="RAM"; break; + case LB_MEM_RESERVED: entry_type="RESERVED"; break; + case LB_MEM_ACPI: entry_type="ACPI"; break; + case LB_MEM_NVS: entry_type="NVS"; break; + case LB_MEM_UNUSABLE: entry_type="UNUSABLE"; break; + case LB_MEM_VENDOR_RSVD: entry_type="VENDOR RESERVED"; break; + case LB_MEM_TABLE: entry_type="CONFIGURATION TABLES"; break; + default: entry_type="UNKNOWN!"; break; + } + + printk(BIOS_DEBUG, "%2d. %016llx-%016llx: %s\n", + i, entry_start, entry_start+entry_size-1, entry_type); + + } +} + + +/* Routines to extract part so the coreboot table or + * information from the coreboot table after we have written it. + * Currently get_lb_mem relies on a global we can change the + * implementaiton. + */ +static struct lb_memory *mem_ranges = 0; +struct lb_memory *get_lb_mem(void) +{ + return mem_ranges; +} + +static void build_lb_mem_range(void *gp, struct device *dev, struct resource *res) +{ + struct lb_memory *mem = gp; + new_lb_memory_range(mem, LB_MEM_RAM, res->base, res->size); +} + +static struct lb_memory *build_lb_mem(struct lb_header *head) +{ + struct lb_memory *mem; + + /* Record where the lb memory ranges will live */ + mem = lb_memory(head); + mem_ranges = mem; + + /* Build the raw table of memory */ + search_global_resources( + IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE, + build_lb_mem_range, mem); + lb_cleanup_memory_ranges(mem); + return mem; +} + +static void lb_add_rsvd_range(void *gp, struct device *dev, struct resource *res) +{ + struct lb_memory *mem = gp; + lb_add_memory_range(mem, LB_MEM_RESERVED, res->base, res->size); +} + +static void add_lb_reserved(struct lb_memory *mem) +{ + /* Add reserved ranges */ + search_global_resources( + IORESOURCE_MEM | IORESOURCE_RESERVE, IORESOURCE_MEM | IORESOURCE_RESERVE, + lb_add_rsvd_range, mem); +} + +unsigned long write_coreboot_table( + unsigned long low_table_start, unsigned long low_table_end, + unsigned long rom_table_start, unsigned long rom_table_end) +{ + struct lb_header *head; + struct lb_memory *mem; + +#if CONFIG_WRITE_HIGH_TABLES + printk(BIOS_DEBUG, "Writing high table forward entry at 0x%08lx\n", + low_table_end); + head = lb_table_init(low_table_end); + lb_forward(head, (struct lb_header*)rom_table_end); + + low_table_end = (unsigned long) lb_table_fini(head, 0); + printk(BIOS_DEBUG, "New low_table_end: 0x%08lx\n", low_table_end); + printk(BIOS_DEBUG, "Now going to write high coreboot table at 0x%08lx\n", + rom_table_end); + + head = lb_table_init(rom_table_end); + rom_table_end = (unsigned long)head; + printk(BIOS_DEBUG, "rom_table_end = 0x%08lx\n", rom_table_end); +#else + if(low_table_end > (0x1000 - sizeof(struct lb_header))) { /* after 4K */ + /* We need to put lbtable on to [0xf0000,0x100000) */ + head = lb_table_init(rom_table_end); + rom_table_end = (unsigned long)head; + } else { + head = lb_table_init(low_table_end); + low_table_end = (unsigned long)head; + } +#endif + + printk(BIOS_DEBUG, "Adjust low_table_end from 0x%08lx to ", low_table_end); + low_table_end += 0xfff; // 4K aligned + low_table_end &= ~0xfff; + printk(BIOS_DEBUG, "0x%08lx \n", low_table_end); + + /* The Linux kernel assumes this region is reserved */ + printk(BIOS_DEBUG, "Adjust rom_table_end from 0x%08lx to ", rom_table_end); + rom_table_end += 0xffff; // 64K align + rom_table_end &= ~0xffff; + printk(BIOS_DEBUG, "0x%08lx \n", rom_table_end); + +#if CONFIG_USE_OPTION_TABLE + { + struct cmos_option_table *option_table = cbfs_find_file("cmos_layout.bin", 0x1aa); + if (option_table) { + struct lb_record *rec_dest = lb_new_record(head); + /* Copy the option config table, it's already a lb_record... */ + memcpy(rec_dest, option_table, option_table->size); + /* Create cmos checksum entry in coreboot table */ + lb_cmos_checksum(head); + } else { + printk(BIOS_ERR, "cmos_layout.bin could not be found!\n"); + } + } +#endif + /* Record where RAM is located */ + mem = build_lb_mem(head); + + /* Record the mptable and the the lb_table (This will be adjusted later) */ + lb_add_memory_range(mem, LB_MEM_TABLE, + low_table_start, low_table_end - low_table_start); + + /* Record the pirq table, acpi tables, and maybe the mptable */ + lb_add_memory_range(mem, LB_MEM_TABLE, + rom_table_start, rom_table_end-rom_table_start); + +#if CONFIG_WRITE_HIGH_TABLES + printk(BIOS_DEBUG, "Adding high table area\n"); + // should this be LB_MEM_ACPI? + lb_add_memory_range(mem, LB_MEM_TABLE, + high_tables_base, high_tables_size); +#endif + + /* Add reserved regions */ + add_lb_reserved(mem); + + lb_dump_memory_ranges(mem); + + /* Note: + * I assume that there is always memory at immediately after + * the low_table_end. This means that after I setup the coreboot table. + * I can trivially fixup the reserved memory ranges to hold the correct + * size of the coreboot table. + */ + + /* Record our motherboard */ + lb_mainboard(head); + /* Record the serial port, if present */ + lb_serial(head); + /* Record our console setup */ + lb_console(head); + /* Record our various random string information */ + lb_strings(head); + /* Record our framebuffer */ + lb_framebuffer(head); + +#if CONFIG_CHROMEOS + /* Record our GPIO settings (ChromeOS specific) */ + lb_gpios(head); + + /* pass along the VDAT buffer adress */ + lb_vdat(head); + + /* pass along VBNV offsets in CMOS */ + lb_vbnv(head); +#endif + add_cbmem_pointers(head); + + /* Remember where my valid memory ranges are */ + return lb_table_fini(head, 1); + +} diff --git a/src/arch/armv7/boot/multiboot.c b/src/arch/armv7/boot/multiboot.c new file mode 100644 index 0000000000..4059f2736b --- /dev/null +++ b/src/arch/armv7/boot/multiboot.c @@ -0,0 +1,77 @@ +/* + * support for Multiboot payloads + * + * Copyright (C) 2008 Robert Millan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + */ + +#include +#include +#include +#include +#include +#include + +struct multiboot_info *mbi = NULL; + +unsigned long write_multiboot_info(unsigned long rom_table_end) +{ + static struct multiboot_mmap_entry *mb_mem; + struct lb_memory* coreboot_table; + int entries; + int i; + + mbi = (struct multiboot_info *)rom_table_end; + + memset(mbi, 0, sizeof(*mbi)); + rom_table_end += sizeof(*mbi); + + mbi->mmap_addr = (u32) rom_table_end; + mb_mem = (struct multiboot_mmap_entry *)rom_table_end; + + /* copy regions from coreboot tables */ + coreboot_table = get_lb_mem(); + entries = (coreboot_table->size - sizeof(*coreboot_table))/sizeof(coreboot_table->map[0]); + + if (coreboot_table == NULL || entries < 1) { + printk(BIOS_INFO, "%s: Cannot find coreboot table.\n", __func__); + return (unsigned long) mb_mem; + } + + for (i = 0; i < entries; i++) { + uint64_t entry_start = unpack_lb64(coreboot_table->map[i].start); + uint64_t entry_size = unpack_lb64(coreboot_table->map[i].size); + mb_mem->addr = entry_start; + mb_mem->len = entry_size; + switch (coreboot_table->map[i].type) { + case LB_MEM_RAM: + mb_mem->type = MULTIBOOT_MEMORY_AVAILABLE; + break; + default: // anything other than usable RAM + mb_mem->type = MULTIBOOT_MEMORY_RESERVED; + break; + } + mb_mem->size = sizeof(*mb_mem) - sizeof(mb_mem->size); + mb_mem++; + } + + mbi->mmap_length = ((u32) mb_mem) - mbi->mmap_addr; + mbi->flags |= MB_INFO_MEM_MAP; + + printk(BIOS_INFO, "Multiboot Information structure has been written.\n"); + + return (unsigned long)mb_mem; +} diff --git a/src/arch/armv7/boot/tables.c b/src/arch/armv7/boot/tables.c new file mode 100644 index 0000000000..07c56354c8 --- /dev/null +++ b/src/arch/armv7/boot/tables.c @@ -0,0 +1,100 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2003 Eric Biederman + * Copyright (C) 2005 Steve Magnani + * Copyright (C) 2008-2009 coresystems GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +uint64_t high_tables_base = 0; +uint64_t high_tables_size; + +void cbmem_arch_init(void) +{ +} + +struct lb_memory *write_tables(void) +{ + unsigned long low_table_start, low_table_end; + unsigned long rom_table_start, rom_table_end; + + /* Even if high tables are configured, some tables are copied both to + * the low and the high area, so payloads and OSes don't need to know + * about the high tables. + */ + unsigned long high_table_pointer; + + if (!high_tables_base) { + printk(BIOS_ERR, "ERROR: High Tables Base is not set.\n"); + // Are there any boards without? + // Stepan thinks we should die() here! + } + + printk(BIOS_DEBUG, "High Tables Base is %llx.\n", high_tables_base); + + rom_table_start = 0xf0000; + rom_table_end = 0xf0000; + + /* Start low addr at 0x500, so we don't run into conflicts with the BDA + * in case our data structures grow beyound 0x400. Only multiboot, GDT + * and the coreboot table use low_tables. + */ + low_table_start = 0; + low_table_end = 0x500; + +#define MAX_COREBOOT_TABLE_SIZE (8 * 1024) + post_code(0x9d); + + high_table_pointer = (unsigned long)cbmem_add(CBMEM_ID_CBTABLE, MAX_COREBOOT_TABLE_SIZE); + + if (high_table_pointer) { + unsigned long new_high_table_pointer; + + /* Also put a forwarder entry into 0-4K */ + new_high_table_pointer = write_coreboot_table(low_table_start, low_table_end, + high_tables_base, high_table_pointer); + + if (new_high_table_pointer > (high_table_pointer + + MAX_COREBOOT_TABLE_SIZE)) + printk(BIOS_ERR, "%s: coreboot table didn't fit (%lx)\n", + __func__, new_high_table_pointer - + high_table_pointer); + + printk(BIOS_DEBUG, "coreboot table: %ld bytes.\n", + new_high_table_pointer - high_table_pointer); + } else { + /* The coreboot table must be in 0-4K or 960K-1M */ + rom_table_end = write_coreboot_table( + low_table_start, low_table_end, + rom_table_start, rom_table_end); + } + + post_code(0x9e); + + // Remove before sending upstream + cbmem_list(); + + return get_lb_mem(); +} diff --git a/src/arch/armv7/boot/wakeup.S b/src/arch/armv7/boot/wakeup.S new file mode 100644 index 0000000000..8ae337ca5d --- /dev/null +++ b/src/arch/armv7/boot/wakeup.S @@ -0,0 +1,94 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2009 Rudolf Marek + * Copyright (C) 2009 coresystems GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define WAKEUP_BASE 0x600 +#define RELOCATED(x) (x - __wakeup + WAKEUP_BASE) + +/* CR0 bits */ +#define PE (1 << 0) + + .code32 + .globl __wakeup +__wakeup: + /* First prepare the jmp to the resume vector */ + mov 0x4(%esp), %eax /* vector */ + /* last 4 bits of linear addr are taken as offset */ + andw $0x0f, %ax + movw %ax, (__wakeup_offset) + mov 0x4(%esp), %eax + /* the rest is taken as segment */ + shr $4, %eax + movw %ax, (__wakeup_segment) + + /* Then overwrite coreboot with our backed up memory */ + cld + movl 8(%esp), %esi + movl 12(%esp), %edi + movl 16(%esp), %ecx + shrl $2, %ecx + rep movsl + + /* Activate the right segment descriptor real mode. */ + ljmp $0x28, $RELOCATED(1f) +1: +.code16 + /* 16 bit code from here on... */ + + /* Load the segment registers w/ properly configured + * segment descriptors. They will retain these + * configurations (limits, writability, etc.) once + * protected mode is turned off. + */ + mov $0x30, %ax + mov %ax, %ds + mov %ax, %es + mov %ax, %fs + mov %ax, %gs + mov %ax, %ss + + /* Turn off protection */ + movl %cr0, %eax + andl $~PE, %eax + movl %eax, %cr0 + + /* Now really going into real mode */ + ljmp $0, $RELOCATED(1f) +1: + movw $0x0, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %ss + movw %ax, %fs + movw %ax, %gs + + /* This is a FAR JMP to the OS waking vector. The C code changed + * the address to be correct. + */ + .byte 0xea + +__wakeup_offset = RELOCATED(.) + .word 0x0000 + +__wakeup_segment = RELOCATED(.) + .word 0x0000 + + .globl __wakeup_size +__wakeup_size = ( . - __wakeup) + diff --git a/src/arch/armv7/bootblock_simple.c b/src/arch/armv7/bootblock_simple.c new file mode 100644 index 0000000000..f447a29cf1 --- /dev/null +++ b/src/arch/armv7/bootblock_simple.c @@ -0,0 +1,51 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2010 Google Inc + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + + + +#include + + +#include "../../lib/uart8250.c" +#include "lib/div.c" + +struct uart8250 uart = { + 115200 +}; + +void main(unsigned long bist) +{ + init_uart8250(CONFIG_TTYS0_BASE, &uart); + uart8250_tx_byte(CONFIG_TTYS0_BASE, '@'); + + if (boot_cpu()) { + bootblock_cpu_init(); + bootblock_northbridge_init(); + bootblock_southbridge_init(); + } + const char* target1 = "fallback/romstage"; + unsigned long entry; + entry = findstage(target1); + if (entry) call(entry, bist); + + hlt(); +} + diff --git a/src/arch/armv7/coreboot_ram.ld b/src/arch/armv7/coreboot_ram.ld new file mode 100644 index 0000000000..57ddd03c0d --- /dev/null +++ b/src/arch/armv7/coreboot_ram.ld @@ -0,0 +1,128 @@ +/* + * Memory map: + * + * CONFIG_RAMBASE : text segment + * : rodata segment + * : data segment + * : bss segment + * : stack + * : heap + */ +/* + * Bootstrap code for the STPC Consumer + * Copyright (c) 1999 by Net Insight AB. All Rights Reserved. + */ + +/* + * Written by Johan Rydberg, based on work by Daniel Kahlin. + * Rewritten by Eric Biederman + * 2005.12 yhlu add coreboot_ram cross the vga font buffer handling + */ + +/* We use ELF as output format. So that we can debug the code in some form. */ +INCLUDE ldoptions + +ENTRY(_start) + +SECTIONS +{ + . = CONFIG_RAMBASE; + /* First we place the code and read only data (typically const declared). + * This could theoretically be placed in rom. + */ + .text : { + _text = .; + *(.text); + *(.text.*); + . = ALIGN(16); + _etext = .; + } + + .rodata : { + _rodata = .; + . = ALIGN(4); + console_drivers = .; + *(.rodata.console_drivers) + econsole_drivers = . ; + . = ALIGN(4); + pci_drivers = . ; + *(.rodata.pci_driver) + epci_drivers = . ; + cpu_drivers = . ; + *(.rodata.cpu_driver) + ecpu_drivers = . ; + *(.rodata) + *(.rodata.*) + /* kevinh/Ispiri - Added an align, because the objcopy tool + * incorrectly converts sections that are not long word aligned. + */ + . = ALIGN(4); + + _erodata = .; + } + /* After the code we place initialized data (typically initialized + * global variables). This gets copied into ram by startup code. + * __data_start and __data_end shows where in ram this should be placed, + * whereas __data_loadstart and __data_loadend shows where in rom to + * copy from. + */ + .data : { + _data = .; + *(.data) + _edata = .; + } + + /* bss does not contain data, it is just a space that should be zero + * initialized on startup. (typically uninitialized global variables) + * crt0.S fills between _bss and _ebss with zeroes. + */ + _bss = .; + .bss . : { + *(.bss) + *(.sbss) + *(COMMON) + } + _ebss = .; + _end = .; + + /* coreboot really "ends" here. Only heap and stack are placed after + * this line. + */ + + . = ALIGN(CONFIG_STACK_SIZE); + + _stack = .; + .stack . : { + /* Reserve a stack for each possible cpu */ + . += CONFIG_MAX_CPUS*CONFIG_STACK_SIZE; + } + _estack = .; + + _heap = .; + .heap . : { + /* Reserve CONFIG_HEAP_SIZE bytes for the heap */ + . = CONFIG_HEAP_SIZE ; + . = ALIGN(4); + } + _eheap = .; + + /* The ram segment. This includes all memory used by the memory + * resident copy of coreboot, except the tables that are produced on + * the fly, but including stack and heap. + */ + _ram_seg = _text; + _eram_seg = _eheap; + + /* CONFIG_RAMTOP is the upper address of cached memory (among other + * things). We must not exceed beyond that address, there be dragons. + */ + _bogus = ASSERT( ( _eram_seg < (CONFIG_RAMTOP)) , "Please increase CONFIG_RAMTOP"); + + /* Discard the sections we don't need/want */ + + /DISCARD/ : { + *(.comment) + *(.note) + *(.note.*) + } +} diff --git a/src/arch/armv7/cpu.c b/src/arch/armv7/cpu.c new file mode 100644 index 0000000000..622e7ed8a8 --- /dev/null +++ b/src/arch/armv7/cpu.c @@ -0,0 +1,101 @@ +/* + * (C) Copyright 2008 Texas Insturments + * + * (C) Copyright 2002 + * Sysgo Real-Time Solutions, GmbH + * Marius Groeger + * + * (C) Copyright 2002 + * Gary Jennejohn, DENX Software Engineering, + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* + * CPU specific code + */ + +#if 0 +#include +#include +#include +#include +#include +#ifdef CONFIG_EXYNOS_LCD +#include +#endif +#endif + +void save_boot_params_default(u32 r0, u32 r1, u32 r2, u32 r3) +{ +} + +void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3) + __attribute__((weak, alias("save_boot_params_default"))); + +#if 0 +int cleanup_before_linux(void) +{ +#ifdef CONFIG_BOOTSTAGE_REPORT + bootstage_report(); +#endif +#ifdef CONFIG_BOOTSTAGE_STASH + bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH, + CONFIG_BOOTSTAGE_STASH_SIZE); +#endif + /* + * this function is called just before we call linux + * it prepares the processor for linux + * + * we turn off caches etc ... + */ + disable_interrupts(); + +#ifdef CONFIG_EXYNOS_LCD + exynos_fimd_disable(); +#endif + + /* + * Turn off I-cache and invalidate it + */ + icache_disable(); + invalidate_icache_all(); + + /* + * turn off D-cache + * dcache_disable() in turn flushes the d-cache and disables MMU + */ + dcache_disable(); + v7_outer_cache_disable(); + + /* + * After D-cache is flushed and before it is disabled there may + * be some new valid entries brought into the cache. We are sure + * that these lines are not dirty and will not affect our execution. + * (because unwinding the call-stack and setting a bit in CP15 SCTRL + * is all we did during this. We have not pushed anything on to the + * stack. Neither have we affected any static data) + * So just invalidate the entire d-cache again to avoid coherency + * problems for kernel + */ + invalidate_dcache_all(); + + return 0; +} +#endif diff --git a/src/arch/armv7/include/arch/atomic.h b/src/arch/armv7/include/arch/atomic.h new file mode 100644 index 0000000000..5a8954fcd4 --- /dev/null +++ b/src/arch/armv7/include/arch/atomic.h @@ -0,0 +1,111 @@ +/* + * linux/include/asm-arm/atomic.h + * + * Copyright (c) 1996 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 27-06-1996 RMK Created + * 13-04-1997 RMK Made functions atomic! + * 07-12-1997 RMK Upgraded for v2.1. + * 26-08-1998 PJB Added #ifdef __KERNEL__ + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +#include + +#ifdef CONFIG_SMP +#error SMP not supported +#endif + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#include + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline void atomic_add(int i, volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter += i; + local_irq_restore(flags); +} + +static inline void atomic_sub(int i, volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter -= i; + local_irq_restore(flags); +} + +static inline void atomic_inc(volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter += 1; + local_irq_restore(flags); +} + +static inline void atomic_dec(volatile atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + v->counter -= 1; + local_irq_restore(flags); +} + +static inline int atomic_dec_and_test(volatile atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val -= 1; + local_irq_restore(flags); + + return val == 0; +} + +static inline int atomic_add_negative(int i, volatile atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val += i; + local_irq_restore(flags); + + return val < 0; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + local_irq_save(flags); + *addr &= ~mask; + local_irq_restore(flags); +} + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif diff --git a/src/arch/armv7/include/arch/boot/boot.h b/src/arch/armv7/include/arch/boot/boot.h new file mode 100644 index 0000000000..08651cd1a8 --- /dev/null +++ b/src/arch/armv7/include/arch/boot/boot.h @@ -0,0 +1,8 @@ +#ifndef ASM_ARM_BOOT_H +#define ASM_ARM_BOOT_H + +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_ARM + +#endif /* ASM_ARM_BOOT_H */ diff --git a/src/arch/armv7/include/arch/byteorder.h b/src/arch/armv7/include/arch/byteorder.h new file mode 100644 index 0000000000..8dc069f486 --- /dev/null +++ b/src/arch/armv7/include/arch/byteorder.h @@ -0,0 +1,27 @@ +#ifndef _BYTEORDER_H +#define _BYTEORDER_H + +#define __LITTLE_ENDIAN 1234 + +#include +#include + +#define cpu_to_le64(x) ((uint64_t)(x)) +#define le64_to_cpu(x) ((uint64_t)(x)) +#define cpu_to_le32(x) ((uint32_t)(x)) +#define le32_to_cpu(x) ((uint32_t)(x)) +#define cpu_to_le16(x) ((uint16_t)(x)) +#define le16_to_cpu(x) ((uint16_t)(x)) +#define cpu_to_be64(x) swab64(x) +#define be64_to_cpu(x) swab64(x) +#define cpu_to_be32(x) swab32((x)) +#define be32_to_cpu(x) swab32((x)) +#define cpu_to_be16(x) swab16((x)) +#define be16_to_cpu(x) swab16((x)) + +#define ntohll(x) be64_to_cpu(x) +#define htonll(x) cpu_to_be64(x) +#define ntohl(x) be32_to_cpu(x) +#define htonl(x) cpu_to_be32(x) + +#endif /* _BYTEORDER_H */ diff --git a/src/arch/armv7/include/arch/coreboot_tables.h b/src/arch/armv7/include/arch/coreboot_tables.h new file mode 100644 index 0000000000..3c9bf98f22 --- /dev/null +++ b/src/arch/armv7/include/arch/coreboot_tables.h @@ -0,0 +1,25 @@ +#ifndef COREBOOT_TABLE_H +#define COREBOOT_TABLE_H + +#include + +/* This file holds function prototypes for building the coreboot table. */ +unsigned long write_coreboot_table( + unsigned long low_table_start, unsigned long low_table_end, + unsigned long rom_table_start, unsigned long rom_table_end); + +void lb_memory_range(struct lb_memory *mem, + uint32_t type, uint64_t start, uint64_t size); + +/* Routines to extract part so the coreboot table or information + * from the coreboot table. + */ +struct lb_memory *get_lb_mem(void); + +extern struct cmos_option_table option_table; + +/* defined by mainboard.c if the mainboard requires extra resources */ +int add_mainboard_resources(struct lb_memory *mem); +int add_northbridge_resources(struct lb_memory *mem); + +#endif /* COREBOOT_TABLE_H */ diff --git a/src/arch/armv7/include/arch/cpu.h b/src/arch/armv7/include/arch/cpu.h index b68004a600..20a12c929e 100644 --- a/src/arch/armv7/include/arch/cpu.h +++ b/src/arch/armv7/include/arch/cpu.h @@ -22,4 +22,25 @@ #define asmlinkage +#if !defined(__PRE_RAM__) +#include + +struct cpu_driver { + struct device_operations *ops; + struct cpu_device_id *id_table; +}; + +struct cpu_info { + device_t cpu; + unsigned long index; +}; + +struct cpuinfo_arm { + uint8_t arm; /* CPU family */ + uint8_t arm_vendor; /* CPU vendor */ + uint8_t arm_model; +}; + #endif + +#endif /* __ARCH_CPU_H__ */ diff --git a/src/arch/armv7/include/arch/gpio.h b/src/arch/armv7/include/arch/gpio.h new file mode 100644 index 0000000000..b5dd9b7345 --- /dev/null +++ b/src/arch/armv7/include/arch/gpio.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2011 The Chromium OS Authors. + * Copyright (c) 2011, NVIDIA Corp. All rights reserved. + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef _ASM_GENERIC_GPIO_H_ +#define _ASM_GENERIC_GPIO_H_ + +/* + * Generic GPIO API for U-Boot + * + * GPIOs are numbered from 0 to GPIO_COUNT-1 which value is defined + * by the SOC/architecture. + * + * Each GPIO can be an input or output. If an input then its value can + * be read as 0 or 1. If an output then its value can be set to 0 or 1. + * If you try to write an input then the value is undefined. If you try + * to read an output, barring something very unusual, you will get + * back the value of the output that you previously set. + * + * In some cases the operation may fail, for example if the GPIO number + * is out of range, or the GPIO is not available because its pin is + * being used by another function. In that case, functions may return + * an error value of -1. + */ + +/** + * Stop using the GPIO. This function should not alter pin configuration. + * + * @param gpio GPIO number + * @return 0 if ok, -1 on error + */ +int gpio_free(unsigned gpio); + +/** + * Make a GPIO an input. + * + * @param gpio GPIO number + * @return 0 if ok, -1 on error + */ +int gpio_direction_input(unsigned gpio); + +/** + * Make a GPIO an output, and set its value. + * + * @param gpio GPIO number + * @param value GPIO value (0 for low or 1 for high) + * @return 0 if ok, -1 on error + */ +int gpio_direction_output(unsigned gpio, int value); + +/** + * Get a GPIO's value. This will work whether the GPIO is an input + * or an output. + * + * @param gpio GPIO number + * @return 0 if low, 1 if high, -1 on error + */ +int gpio_get_value(unsigned gpio); + +/** + * Set an output GPIO's value. The GPIO must already be an output or + * this function may have no effect. + * + * @param gpio GPIO number + * @param value GPIO value (0 for low or 1 for high) + * @return 0 if ok, -1 on error + */ +int gpio_set_value(unsigned gpio, int value); + +/** + * Request ownership of a gpio. This should be called before any of the other + * functions are used on this gpio. + * + * @param gp GPIO number + * @param label User label for this GPIO + * @return 0 if ok, -1 on error + */ +int gpio_request(unsigned gpio, const char *label); + +#endif /* _ASM_GENERIC_GPIO_H_ */ diff --git a/src/arch/armv7/include/arch/hlt.h b/src/arch/armv7/include/arch/hlt.h new file mode 100644 index 0000000000..535508a714 --- /dev/null +++ b/src/arch/armv7/include/arch/hlt.h @@ -0,0 +1,10 @@ +#ifndef ARCH_HLT_H +#define ARCH_HLT_H + +static inline __attribute__((always_inline)) void hlt(void) +{ + for (;;) ; + //asm("hlt"); +} + +#endif /* ARCH_HLT_H */ diff --git a/src/arch/armv7/include/arch/io.h b/src/arch/armv7/include/arch/io.h new file mode 100644 index 0000000000..7675a88d31 --- /dev/null +++ b/src/arch/armv7/include/arch/io.h @@ -0,0 +1,427 @@ +/* + * linux/include/asm-arm/io.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Modifications: + * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both + * constant addresses and variable addresses. + * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture + * specific IO header files. + * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. + * 04-Apr-1999 PJB Added check_signature. + * 12-Dec-1999 RMK More cleanups + * 18-Jun-2000 RMK Removed virt_to_* and friends definitions + */ +#ifndef __ASM_ARM_IO_H +#define __ASM_ARM_IO_H + +#ifdef __KERNEL__ + +#include +#include +#if 0 /* XXX###XXX */ +#include +#endif /* XXX###XXX */ + +static inline void sync(void) +{ +} + +/* + * Generic virtual read/write. Note that we don't support half-word + * read/writes. We define __arch_*[bl] here, and leave __arch_*w + * to the architecture specific code. + */ +#define __arch_getb(a) (*(volatile unsigned char *)(a)) +#define __arch_getw(a) (*(volatile unsigned short *)(a)) +#define __arch_getl(a) (*(volatile unsigned int *)(a)) + +#define __arch_putb(v,a) (*(volatile unsigned char *)(a) = (v)) +#define __arch_putw(v,a) (*(volatile unsigned short *)(a) = (v)) +#define __arch_putl(v,a) (*(volatile unsigned int *)(a) = (v)) + +#if 0 +extern inline void __raw_writesb(unsigned int addr, const void *data, int bytelen) +{ + uint8_t *buf = (uint8_t *)data; + while(bytelen--) + __arch_putb(*buf++, addr); +} + +extern inline void __raw_writesw(unsigned int addr, const void *data, int wordlen) +{ + uint16_t *buf = (uint16_t *)data; + while(wordlen--) + __arch_putw(*buf++, addr); +} + +extern inline void __raw_writesl(unsigned int addr, const void *data, int longlen) +{ + uint32_t *buf = (uint32_t *)data; + while(longlen--) + __arch_putl(*buf++, addr); +} + +extern inline void __raw_readsb(unsigned int addr, void *data, int bytelen) +{ + uint8_t *buf = (uint8_t *)data; + while(bytelen--) + *buf++ = __arch_getb(addr); +} + +extern inline void __raw_readsw(unsigned int addr, void *data, int wordlen) +{ + uint16_t *buf = (uint16_t *)data; + while(wordlen--) + *buf++ = __arch_getw(addr); +} + +extern inline void __raw_readsl(unsigned int addr, void *data, int longlen) +{ + uint32_t *buf = (uint32_t *)data; + while(longlen--) + *buf++ = __arch_getl(addr); +} +#endif + +#define __raw_writeb(v,a) __arch_putb(v,a) +#define __raw_writew(v,a) __arch_putw(v,a) +#define __raw_writel(v,a) __arch_putl(v,a) + +#define __raw_readb(a) __arch_getb(a) +#define __raw_readw(a) __arch_getw(a) +#define __raw_readl(a) __arch_getl(a) + +/* + * TODO: The kernel offers some more advanced versions of barriers, it might + * have some advantages to use them instead of the simple one here. + */ +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#define __iormb() dmb() +#define __iowmb() dmb() + +#define writeb(v,c) ({ u8 __v = v; __iowmb(); __arch_putb(__v,c); __v; }) +#define writew(v,c) ({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; }) +#define writel(v,c) ({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; }) + +#define readb(c) ({ u8 __v = __arch_getb(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = __arch_getw(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = __arch_getl(c); __iormb(); __v; }) + +/* + * The compiler seems to be incapable of optimising constants + * properly. Spell it out to the compiler in some cases. + * These are only valid for small values of "off" (< 1<<12) + */ +#define __raw_base_writeb(val,base,off) __arch_base_putb(val,base,off) +#define __raw_base_writew(val,base,off) __arch_base_putw(val,base,off) +#define __raw_base_writel(val,base,off) __arch_base_putl(val,base,off) + +#define __raw_base_readb(base,off) __arch_base_getb(base,off) +#define __raw_base_readw(base,off) __arch_base_getw(base,off) +#define __raw_base_readl(base,off) __arch_base_getl(base,off) + +/* + * Clear and set bits in one shot. These macros can be used to clear and + * set multiple bits in a register using a single call. These macros can + * also be used to set a multiple-bit bit pattern using a mask, by + * specifying the mask in the 'clear' parameter and the new bit pattern + * in the 'set' parameter. + */ + +#define out_arch(type,endian,a,v) __raw_write##type(cpu_to_##endian(v),a) +#define in_arch(type,endian,a) endian##_to_cpu(__raw_read##type(a)) + +#define out_le32(a,v) out_arch(l,le32,a,v) +#define out_le16(a,v) out_arch(w,le16,a,v) + +#define in_le32(a) in_arch(l,le32,a) +#define in_le16(a) in_arch(w,le16,a) + +#define out_be32(a,v) out_arch(l,be32,a,v) +#define out_be16(a,v) out_arch(w,be16,a,v) + +#define in_be32(a) in_arch(l,be32,a) +#define in_be16(a) in_arch(w,be16,a) + +#define out_8(a,v) __raw_writeb(v,a) +#define in_8(a) __raw_readb(a) + +#define clrbits(type, addr, clear) \ + out_##type((addr), in_##type(addr) & ~(clear)) + +#define setbits(type, addr, set) \ + out_##type((addr), in_##type(addr) | (set)) + +#define clrsetbits(type, addr, clear, set) \ + out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) + +#define clrbits_be32(addr, clear) clrbits(be32, addr, clear) +#define setbits_be32(addr, set) setbits(be32, addr, set) +#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) + +#define clrbits_le32(addr, clear) clrbits(le32, addr, clear) +#define setbits_le32(addr, set) setbits(le32, addr, set) +#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) + +#define clrbits_be16(addr, clear) clrbits(be16, addr, clear) +#define setbits_be16(addr, set) setbits(be16, addr, set) +#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) + +#define clrbits_le16(addr, clear) clrbits(le16, addr, clear) +#define setbits_le16(addr, set) setbits(le16, addr, set) +#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) + +#define clrbits_8(addr, clear) clrbits(8, addr, clear) +#define setbits_8(addr, set) setbits(8, addr, set) +#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) + +/* + * Now, pick up the machine-defined IO definitions + */ +#if 0 /* XXX###XXX */ +#include +#endif /* XXX###XXX */ + +/* + * IO port access primitives + * ------------------------- + * + * The ARM doesn't have special IO access instructions; all IO is memory + * mapped. Note that these are defined to perform little endian accesses + * only. Their primary purpose is to access PCI and ISA peripherals. + * + * Note that for a big endian machine, this implies that the following + * big endian mode connectivity is in place, as described by numerous + * ARM documents: + * + * PCI: D0-D7 D8-D15 D16-D23 D24-D31 + * ARM: D24-D31 D16-D23 D8-D15 D0-D7 + * + * The machine specific io.h include defines __io to translate an "IO" + * address to a memory address. + * + * Note that we prevent GCC re-ordering or caching values in expressions + * by introducing sequence points into the in*() definitions. Note that + * __raw_* do not guarantee this behaviour. + * + * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. + */ +#ifdef __io +#define outb(v,p) __raw_writeb(v,__io(p)) +#define outw(v,p) __raw_writew(cpu_to_le16(v),__io(p)) +#define outl(v,p) __raw_writel(cpu_to_le32(v),__io(p)) + +#define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; }) +#define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; }) +#define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; }) + +#define outsb(p,d,l) __raw_writesb(__io(p),d,l) +#define outsw(p,d,l) __raw_writesw(__io(p),d,l) +#define outsl(p,d,l) __raw_writesl(__io(p),d,l) + +#define insb(p,d,l) __raw_readsb(__io(p),d,l) +#define insw(p,d,l) __raw_readsw(__io(p),d,l) +#define insl(p,d,l) __raw_readsl(__io(p),d,l) +#endif + +#define outb_p(val,port) outb((val),(port)) +#define outw_p(val,port) outw((val),(port)) +#define outl_p(val,port) outl((val),(port)) +#define inb_p(port) inb((port)) +#define inw_p(port) inw((port)) +#define inl_p(port) inl((port)) + +#define outsb_p(port,from,len) outsb(port,from,len) +#define outsw_p(port,from,len) outsw(port,from,len) +#define outsl_p(port,from,len) outsl(port,from,len) +#define insb_p(port,to,len) insb(port,to,len) +#define insw_p(port,to,len) insw(port,to,len) +#define insl_p(port,to,len) insl(port,to,len) + +/* + * ioremap and friends. + * + * ioremap takes a PCI memory address, as specified in + * linux/Documentation/IO-mapping.txt. If you want a + * physical address, use __ioremap instead. + */ +extern void * __ioremap(unsigned long offset, size_t size, unsigned long flags); +extern void __iounmap(void *addr); + +/* + * Generic ioremap support. + * + * Define: + * iomem_valid_addr(off,size) + * iomem_to_phys(off) + */ +#ifdef iomem_valid_addr +#define __arch_ioremap(off,sz,nocache) \ + ({ \ + unsigned long _off = (off), _size = (sz); \ + void *_ret = (void *)0; \ + if (iomem_valid_addr(_off, _size)) \ + _ret = __ioremap(iomem_to_phys(_off),_size,nocache); \ + _ret; \ + }) + +#define __arch_iounmap __iounmap +#endif + +/* + * String version of IO memory access ops: + */ +extern void _memcpy_fromio(void *, unsigned long, size_t); +extern void _memcpy_toio(unsigned long, const void *, size_t); +extern void _memset_io(unsigned long, int, size_t); + +extern void __readwrite_bug(const char *fn); + +/* + * If this architecture has PCI memory IO, then define the read/write + * macros. These should only be used with the cookie passed from + * ioremap. + */ +#ifdef __mem_pci + +#define readb(c) ({ unsigned int __v = __raw_readb(__mem_pci(c)); __v; }) +#define readw(c) ({ unsigned int __v = le16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) +#define readl(c) ({ unsigned int __v = le32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) + +#define writeb(v,c) __raw_writeb(v,__mem_pci(c)) +#define writew(v,c) __raw_writew(cpu_to_le16(v),__mem_pci(c)) +#define writel(v,c) __raw_writel(cpu_to_le32(v),__mem_pci(c)) + +#define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l)) +#define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l)) +#define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l)) + +#define eth_io_copy_and_sum(s,c,l,b) \ + eth_copy_and_sum((s),__mem_pci(c),(l),(b)) + +static inline int +check_signature(unsigned long io_addr, const unsigned char *signature, + int length) +{ + int retval = 0; + do { + if (readb(io_addr) != *signature) + goto out; + io_addr++; + signature++; + length--; + } while (length); + retval = 1; +out: + return retval; +} + +#elif !defined(readb) + +#define readb(addr) (__readwrite_bug("readb"),0) +#define readw(addr) (__readwrite_bug("readw"),0) +#define readl(addr) (__readwrite_bug("readl"),0) +#define writeb(v,addr) __readwrite_bug("writeb") +#define writew(v,addr) __readwrite_bug("writew") +#define writel(v,addr) __readwrite_bug("writel") + +#define eth_io_copy_and_sum(a,b,c,d) __readwrite_bug("eth_io_copy_and_sum") + +#define check_signature(io,sig,len) (0) + +#endif /* __mem_pci */ + +/* FIXME(dhendrix): added to make uart8250_mem code happy. Note: lL */ +static inline __attribute__((always_inline)) uint8_t read8(unsigned long addr) +{ + return readb(addr); +} + +static inline __attribute__((always_inline)) uint16_t read16(unsigned long addr) +{ + return readw(addr); +} + +static inline __attribute__((always_inline)) uint32_t read32(unsigned long addr) +{ + return readl(addr); +} + +static inline __attribute__((always_inline)) void write8(unsigned long addr, uint8_t value) +{ + writeb(value, addr); +} + +static inline __attribute__((always_inline)) void write16(unsigned long addr, uint16_t value) +{ + writew(value, addr); +} + +static inline __attribute__((always_inline)) void write32(unsigned long addr, uint32_t value) +{ + writel(value, addr); +} + + +/* + * If this architecture has ISA IO, then define the isa_read/isa_write + * macros. + */ +#ifdef __mem_isa + +#define isa_readb(addr) __raw_readb(__mem_isa(addr)) +#define isa_readw(addr) __raw_readw(__mem_isa(addr)) +#define isa_readl(addr) __raw_readl(__mem_isa(addr)) +#define isa_writeb(val,addr) __raw_writeb(val,__mem_isa(addr)) +#define isa_writew(val,addr) __raw_writew(val,__mem_isa(addr)) +#define isa_writel(val,addr) __raw_writel(val,__mem_isa(addr)) +#define isa_memset_io(a,b,c) _memset_io(__mem_isa(a),(b),(c)) +#define isa_memcpy_fromio(a,b,c) _memcpy_fromio((a),__mem_isa(b),(c)) +#define isa_memcpy_toio(a,b,c) _memcpy_toio(__mem_isa((a)),(b),(c)) + +#define isa_eth_io_copy_and_sum(a,b,c,d) \ + eth_copy_and_sum((a),__mem_isa(b),(c),(d)) + +static inline int +isa_check_signature(unsigned long io_addr, const unsigned char *signature, + int length) +{ + int retval = 0; + do { + if (isa_readb(io_addr) != *signature) + goto out; + io_addr++; + signature++; + length--; + } while (length); + retval = 1; +out: + return retval; +} + +#else /* __mem_isa */ + +#define isa_readb(addr) (__readwrite_bug("isa_readb"),0) +#define isa_readw(addr) (__readwrite_bug("isa_readw"),0) +#define isa_readl(addr) (__readwrite_bug("isa_readl"),0) +#define isa_writeb(val,addr) __readwrite_bug("isa_writeb") +#define isa_writew(val,addr) __readwrite_bug("isa_writew") +#define isa_writel(val,addr) __readwrite_bug("isa_writel") +#define isa_memset_io(a,b,c) __readwrite_bug("isa_memset_io") +#define isa_memcpy_fromio(a,b,c) __readwrite_bug("isa_memcpy_fromio") +#define isa_memcpy_toio(a,b,c) __readwrite_bug("isa_memcpy_toio") + +#define isa_eth_io_copy_and_sum(a,b,c,d) \ + __readwrite_bug("isa_eth_io_copy_and_sum") + +#define isa_check_signature(io,sig,len) (0) + +#endif /* __mem_isa */ +#endif /* __KERNEL__ */ +#endif /* __ASM_ARM_IO_H */ diff --git a/src/arch/armv7/include/arch/pci_ops.h b/src/arch/armv7/include/arch/pci_ops.h new file mode 100644 index 0000000000..eca939045f --- /dev/null +++ b/src/arch/armv7/include/arch/pci_ops.h @@ -0,0 +1,19 @@ +#ifndef ARCH_I386_PCI_OPS_H +#define ARCH_I386_PCI_OPS_H + +extern const struct pci_bus_operations pci_cf8_conf1; + +#if CONFIG_MMCONF_SUPPORT +extern const struct pci_bus_operations pci_ops_mmconf; +#endif + +static inline const struct pci_bus_operations *pci_config_default(void) +{ + return &pci_cf8_conf1; +} + +static inline void pci_set_method(device_t dev) +{ + dev->ops->ops_pci_bus = pci_config_default(); +} +#endif /* ARCH_I386_PCI_OPS_H */ diff --git a/src/arch/armv7/include/arch/types.h b/src/arch/armv7/include/arch/types.h new file mode 100644 index 0000000000..71dc049da6 --- /dev/null +++ b/src/arch/armv7/include/arch/types.h @@ -0,0 +1,53 @@ +#ifndef __ASM_ARM_TYPES_H +#define __ASM_ARM_TYPES_H + +typedef unsigned short umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) +__extension__ typedef __signed__ long long __s64; +__extension__ typedef unsigned long long __u64; +#endif + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long long s64; +typedef unsigned long long u64; + +#define BITS_PER_LONG 32 + +/* Dma addresses are 32-bits wide. */ + +typedef u32 dma_addr_t; + +typedef unsigned long phys_addr_t; +typedef unsigned long phys_size_t; + +#endif /* __KERNEL__ */ + +#endif diff --git a/src/arch/armv7/include/armv7.h b/src/arch/armv7/include/armv7.h new file mode 100644 index 0000000000..dc111c17ee --- /dev/null +++ b/src/arch/armv7/include/armv7.h @@ -0,0 +1,76 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, + * Aneesh V + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#ifndef ARMV7_H +#define ARMV7_H +#include + +/* Cortex-A9 revisions */ +#define MIDR_CORTEX_A9_R0P1 0x410FC091 +#define MIDR_CORTEX_A9_R1P2 0x411FC092 +#define MIDR_CORTEX_A9_R1P3 0x411FC093 +#define MIDR_CORTEX_A9_R2P10 0x412FC09A + +/* Cortex-A15 revisions */ +#define MIDR_CORTEX_A15_R0P0 0x410FC0F0 + +/* CCSIDR */ +#define CCSIDR_LINE_SIZE_OFFSET 0 +#define CCSIDR_LINE_SIZE_MASK 0x7 +#define CCSIDR_ASSOCIATIVITY_OFFSET 3 +#define CCSIDR_ASSOCIATIVITY_MASK (0x3FF << 3) +#define CCSIDR_NUM_SETS_OFFSET 13 +#define CCSIDR_NUM_SETS_MASK (0x7FFF << 13) + +/* + * Values for InD field in CSSELR + * Selects the type of cache + */ +#define ARMV7_CSSELR_IND_DATA_UNIFIED 0 +#define ARMV7_CSSELR_IND_INSTRUCTION 1 + +/* Values for Ctype fields in CLIDR */ +#define ARMV7_CLIDR_CTYPE_NO_CACHE 0 +#define ARMV7_CLIDR_CTYPE_INSTRUCTION_ONLY 1 +#define ARMV7_CLIDR_CTYPE_DATA_ONLY 2 +#define ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA 3 +#define ARMV7_CLIDR_CTYPE_UNIFIED 4 + +/* + * CP15 Barrier instructions + * Please note that we have separate barrier instructions in ARMv7 + * However, we use the CP15 based instructtions because we use + * -march=armv5 in U-Boot + */ +#define CP15ISB asm volatile ("mcr p15, 0, %0, c7, c5, 4" : : "r" (0)) +#define CP15DSB asm volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)) +#define CP15DMB asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0)) + +void v7_outer_cache_enable(void); +void v7_outer_cache_disable(void); +void v7_outer_cache_flush_all(void); +void v7_outer_cache_inval_all(void); +void v7_outer_cache_flush_range(u32 start, u32 end); +void v7_outer_cache_inval_range(u32 start, u32 end); + +#endif diff --git a/src/arch/armv7/include/assembler.h b/src/arch/armv7/include/assembler.h new file mode 100644 index 0000000000..5e4789b145 --- /dev/null +++ b/src/arch/armv7/include/assembler.h @@ -0,0 +1,60 @@ +/* + * arch/arm/include/asm/assembler.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains arm architecture specific defines + * for the different processors. + * + * Do not include any C declarations in this file - it is included by + * assembler source. + */ + +/* + * Endian independent macros for shifting bytes within registers. + */ +#ifndef __ARMEB__ +#define pull lsr +#define push lsl +#define get_byte_0 lsl #0 +#define get_byte_1 lsr #8 +#define get_byte_2 lsr #16 +#define get_byte_3 lsr #24 +#define put_byte_0 lsl #0 +#define put_byte_1 lsl #8 +#define put_byte_2 lsl #16 +#define put_byte_3 lsl #24 +#else +#define pull lsl +#define push lsr +#define get_byte_0 lsr #24 +#define get_byte_1 lsr #16 +#define get_byte_2 lsr #8 +#define get_byte_3 lsl #0 +#define put_byte_0 lsl #24 +#define put_byte_1 lsl #16 +#define put_byte_2 lsl #8 +#define put_byte_3 lsl #0 +#endif + +/* + * Data preload for architectures that support it + */ +#if defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) || \ + defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ + defined(__ARM_ARCH_6T2__) || defined(__ARM_ARCH_6Z__) || \ + defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7R__) +#define PLD(code...) code +#else +#define PLD(code...) +#endif + +/* + * Cache alligned + */ +#define CALGN(code...) code diff --git a/src/arch/armv7/include/bootblock_common.h b/src/arch/armv7/include/bootblock_common.h new file mode 100644 index 0000000000..f5c712945d --- /dev/null +++ b/src/arch/armv7/include/bootblock_common.h @@ -0,0 +1,69 @@ +#include +#include +#include +#include + + +#define boot_cpu(x) 1 + +#ifdef CONFIG_BOOTBLOCK_CPU_INIT +#include CONFIG_BOOTBLOCK_CPU_INIT +#else +static void bootblock_cpu_init(void) { } +#endif +#ifdef CONFIG_BOOTBLOCK_NORTHBRIDGE_INIT +#include CONFIG_BOOTBLOCK_NORTHBRIDGE_INIT +#else +static void bootblock_northbridge_init(void) { } +#endif +#ifdef CONFIG_BOOTBLOCK_SOUTHBRIDGE_INIT +#include CONFIG_BOOTBLOCK_SOUTHBRIDGE_INIT +#else +static void bootblock_southbridge_init(void) { } +#endif + +static int cbfs_check_magic(struct cbfs_file *file) +{ + return !strcmp(file->magic, CBFS_FILE_MAGIC) ? 1 : 0; +} + +static unsigned long findstage(const char* target) +{ + unsigned long offset; + + void *ptr = (void *)*((unsigned long *) CBFS_HEADPTR_ADDR); + struct cbfs_header *header = (struct cbfs_header *) ptr; + // if (ntohl(header->magic) != CBFS_HEADER_MAGIC) + // printk(BIOS_ERR, "ERROR: No valid CBFS header found!\n"); + + offset = 0 - ntohl(header->romsize) + ntohl(header->offset); + int align = ntohl(header->align); + while(1) { + struct cbfs_file *file = (struct cbfs_file *) offset; + if (!cbfs_check_magic(file)) + return 0; + if (!strcmp(CBFS_NAME(file), target)) + return (unsigned long)CBFS_SUBHEADER(file); + int flen = ntohl(file->len); + int foffset = ntohl(file->offset); + unsigned long oldoffset = offset; + offset = ALIGN(offset + foffset + flen, align); + if (offset <= oldoffset) + return 0; + if (offset < 0xFFFFFFFF - ntohl(header->romsize)) + return 0; + } +} + + +static void call(unsigned long addr, unsigned long bist) +{ + asm volatile ("mov r0, %1\nbx %0\n" : : "r" (addr), "r" (bist)); +} + +static void hlt(void) +{ + /* is there such a thing as hlt on ARM? */ + // asm volatile ("1:\n\thlt\n\tjmp 1b\n\t"); + asm volatile ("1:\nb 1b\n\t"); +} diff --git a/src/arch/armv7/include/cache.h b/src/arch/armv7/include/cache.h new file mode 100644 index 0000000000..cf8fb5ad61 --- /dev/null +++ b/src/arch/armv7/include/cache.h @@ -0,0 +1,56 @@ +/* + * (C) Copyright 2009 + * Marvell Semiconductor + * Written-by: Prafulla Wadaskar + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +#ifndef _ASM_CACHE_H +#define _ASM_CACHE_H + +/* + * Invalidate L2 Cache using co-proc instruction + */ +static inline void invalidate_l2_cache(void) +{ + unsigned int val=0; + + asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache" + : : "r" (val) : "cc"); + isb(); +} + +void l2_cache_enable(void); +void l2_cache_disable(void); + +/* + * The current upper bound for ARM L1 data cache line sizes is 64 bytes. We + * use that value for aligning DMA buffers unless the board config has specified + * an alternate cache line size. + */ +#ifdef CONFIG_SYS_CACHELINE_SIZE +#define ARCH_DMA_MINALIGN CONFIG_SYS_CACHELINE_SIZE +#else +#define ARCH_DMA_MINALIGN 64 +#endif + +inline void dram_bank_mmu_setup(unsigned long start, unsigned long size); + +#endif /* _ASM_CACHE_H */ diff --git a/src/arch/armv7/include/clocks.h b/src/arch/armv7/include/clocks.h new file mode 100644 index 0000000000..8f35303fd3 --- /dev/null +++ b/src/arch/armv7/include/clocks.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2011 The Chromium OS Authors. + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* Standard clock speeds */ + +/* + * We define some commonly-used clock speeds to avoid error since long + * numbers are hard to read. + * + * The format of the label is + * CLK_x_yU where: + * x is the integer speed + * y is the fractional part which can be omitted if 0 + * U is the units (blank for Hz, K or M for KHz and MHz) + * + * Please order the items by increasing Hz + */ +enum { + CLK_32768 = 32768, + CLK_20M = 20000000, + CLK_24M = 24000000, + CLK_144M = 144000000, + CLK_216M = 216000000, + CLK_300M = 300000000, +}; + diff --git a/src/arch/armv7/include/common.h b/src/arch/armv7/include/common.h new file mode 100644 index 0000000000..e21cfb0239 --- /dev/null +++ b/src/arch/armv7/include/common.h @@ -0,0 +1,494 @@ +/* + * (C) Copyright 2000-2009 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __COMMON_H_ +#define __COMMON_H_ 1 + +#undef _LINUX_CONFIG_H +#define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ + +#ifndef __ASSEMBLER__ /* put C only stuff in this section */ + +typedef unsigned char uchar; +typedef volatile unsigned long vu_long; +typedef volatile unsigned short vu_short; +typedef volatile unsigned char vu_char; +typedef unsigned long ulong; +typedef unsigned int uint; + +//#include +//#include +//#include +//#include +//#include +#include +//#include + +//#include +//#include +//#include + +#ifdef DEBUG +#define debug(fmt,args...) printf (fmt ,##args) +#define debugX(level,fmt,args...) if (DEBUG>=level) printf(fmt,##args); +#else +#define debug(fmt,args...) +#define debugX(level,fmt,args...) +#endif /* DEBUG */ + +#ifdef DEBUG +# define _DEBUG 1 +#else +# define _DEBUG 0 +#endif + +/* + * An assertion is run-time check done in debug mode only. If DEBUG is not + * defined then it is skipped. If DEBUG is defined and the assertion fails, + * then it calls panic*( which may or may not reset/halt U-Boot (see + * CONFIG_PANIC_HANG), It is hoped that all failing assertions are found + * before release, and after release it is hoped that they don't matter. But + * in any case these failing assertions cannot be fixed with a reset (which + * may just do the same assertion again). + */ +void __assert_fail(const char *assertion, const char *file, unsigned line, + const char *function); +#define assert(x) \ + ({ if (!(x) && _DEBUG) \ + __assert_fail(#x, __FILE__, __LINE__, __func__); }) + +#define error(fmt, args...) do { \ + printf("ERROR: " fmt "\nat %s:%d/%s()\n", \ + ##args, __FILE__, __LINE__, __func__); \ +} while (0) + +#ifndef BUG +#define BUG() do { \ + printf("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ + panic("BUG!"); \ +} while (0) +#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) +#endif /* BUG */ + +/* Force a compilation error if condition is true */ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + +typedef void (interrupt_handler_t)(void *); + +//#include /* boot information for Linux kernel */ +#include /* global data used for startup functions */ + +/* + * Return the time since boot in microseconds, This is needed for bootstage + * and should be defined in CPU- or board-specific code. If undefined then + * millisecond resolution will be used (the standard get_timer()). + */ +ulong timer_get_boot_us(void); + +/* + * Return the current value of a monotonically increasing microsecond timer. + * Granularity may be larger than 1us if hardware does not support this. + */ +ulong timer_get_us(void); + +/* + * General Purpose Utilities + */ +#if 0 +#define min(X, Y) \ + ({ typeof (X) __x = (X); \ + typeof (Y) __y = (Y); \ + (__x < __y) ? __x : __y; }) + +#define max(X, Y) \ + ({ typeof (X) __x = (X); \ + typeof (Y) __y = (Y); \ + (__x > __y) ? __x : __y; }) +#define MIN(x, y) min(x, y) +#define MAX(x, y) max(x, y) +#endif + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +/* + * Function Prototypes + */ + +void hang (void) __attribute__ ((noreturn)); + +int init_timer(void); /* FIXME(dhendrix): used to be timer_init() */ +int cpu_init(void); + +/* */ +unsigned long long initdram (int); +int display_options (void); +void print_size(unsigned long long, const char *); +int print_buffer (ulong addr, void* data, uint width, uint count, uint linelen); + +/* common/main.c */ +void main_loop (void); +int run_command (const char *cmd, int flag); +int readline (const char *const prompt); +int readline_into_buffer (const char *const prompt, char * buffer); +int parse_line (char *, char *[]); +void init_cmd_timeout(void); +void reset_cmd_timeout(void); + +/* arch/$(ARCH)/lib/board.c */ +void board_init_f (ulong); +//void board_init_f (ulong) __attribute__ ((noreturn)); +void board_init_r (gd_t *, ulong) __attribute__ ((noreturn)); +int checkboard (void); +int checkflash (void); +int checkdram (void); +int last_stage_init(void); +extern ulong monitor_flash_len; +int mac_read_from_eeprom(void); + +#ifdef CONFIG_ARM +# include +# include +# include /* ARM version to be fixed! */ +#endif /* CONFIG_ARM */ + +int misc_init_f (void); +int misc_init_r (void); + +/* common/exports.c */ +void jumptable_init(void); + +/* common/kallsysm.c */ +const char *symbol_lookup(unsigned long addr, unsigned long *caddr); + +/* api/api.c */ +void api_init (void); + +/* common/memsize.c */ +long get_ram_size (long *, long); + +/* $(BOARD)/$(BOARD).c */ +void reset_phy (void); +void fdc_hw_init (void); + +/* $(BOARD)/eeprom.c */ +void eeprom_init (void); +#ifndef CONFIG_SPI +int eeprom_probe (unsigned dev_addr, unsigned offset); +#endif +int eeprom_read (unsigned dev_addr, unsigned offset, uchar *buffer, unsigned cnt); +int eeprom_write (unsigned dev_addr, unsigned offset, uchar *buffer, unsigned cnt); + +/* + * Set this up regardless of board + * type, to prevent errors. + */ +#if defined(CONFIG_SPI) || !defined(CONFIG_SYS_I2C_EEPROM_ADDR) +# define CONFIG_SYS_DEF_EEPROM_ADDR 0 +#else +#if !defined(CONFIG_ENV_EEPROM_IS_ON_I2C) +# define CONFIG_SYS_DEF_EEPROM_ADDR CONFIG_SYS_I2C_EEPROM_ADDR +#endif +#endif /* CONFIG_SPI || !defined(CONFIG_SYS_I2C_EEPROM_ADDR) */ + +#if defined(CONFIG_SPI) +extern void spi_init_f (void); +extern void spi_init_r (void); +extern ssize_t spi_read (uchar *, int, uchar *, int); +extern ssize_t spi_write (uchar *, int, uchar *, int); +#endif + +/* $(BOARD)/$(BOARD).c */ +int board_early_init_f (void); +int board_late_init (void); +int board_postclk_init (void); /* after clocks/timebase, before env/serial */ +int board_early_init_r (void); +void board_poweroff (void); + +#if defined(CONFIG_SYS_DRAM_TEST) +int testdram(void); +#endif /* CONFIG_SYS_DRAM_TEST */ + +/* $(CPU)/start.S */ +#if defined(CONFIG_5xx) || \ + defined(CONFIG_8xx) +uint get_immr (uint); +#endif +uint get_pir (void); +#if defined(CONFIG_MPC5xxx) +uint get_svr (void); +#endif +uint get_pvr (void); +uint get_svr (void); +uint rd_ic_cst (void); +void wr_ic_cst (uint); +void wr_ic_adr (uint); +uint rd_dc_cst (void); +void wr_dc_cst (uint); +void wr_dc_adr (uint); +int icache_status (void); +void icache_enable (void); +void icache_disable(void); +int dcache_status (void); +void dcache_enable (void); +void dcache_disable(void); +void mmu_disable(void); +void relocate_code (ulong, gd_t *, ulong) __attribute__ ((noreturn)); +ulong get_endaddr (void); +void trap_init (ulong); +#if defined (CONFIG_4xx) || \ + defined (CONFIG_MPC5xxx) || \ + defined (CONFIG_74xx_7xx) || \ + defined (CONFIG_74x) || \ + defined (CONFIG_75x) || \ + defined (CONFIG_74xx) || \ + defined (CONFIG_MPC8220) || \ + defined (CONFIG_MPC85xx) || \ + defined (CONFIG_MPC86xx) || \ + defined (CONFIG_MPC83xx) +unsigned char in8(unsigned int); +void out8(unsigned int, unsigned char); +unsigned short in16(unsigned int); +unsigned short in16r(unsigned int); +void out16(unsigned int, unsigned short value); +void out16r(unsigned int, unsigned short value); +unsigned long in32(unsigned int); +unsigned long in32r(unsigned int); +void out32(unsigned int, unsigned long value); +void out32r(unsigned int, unsigned long value); +void ppcDcbf(unsigned long value); +void ppcDcbi(unsigned long value); +void ppcSync(void); +void ppcDcbz(unsigned long value); +#endif + +/* $(CPU)/cpu.c */ +static inline int cpumask_next(int cpu, unsigned int mask) +{ + for (cpu++; !((1 << cpu) & mask); cpu++) + ; + + return cpu; +} + +#define for_each_cpu(iter, cpu, num_cpus, mask) \ + for (iter = 0, cpu = cpumask_next(-1, mask); \ + iter < num_cpus; \ + iter++, cpu = cpumask_next(cpu, mask)) \ + +int cpu_numcores (void); +u32 cpu_mask (void); +int is_core_valid (unsigned int); +int probecpu (void); +int checkcpu (void); +int checkicache (void); +int checkdcache (void); +void upmconfig (unsigned int, unsigned int *, unsigned int); +ulong get_tbclk (void); +void reset_cpu (ulong addr); +#if defined (CONFIG_OF_LIBFDT) && defined (CONFIG_OF_BOARD_SETUP) +void ft_cpu_setup(void *blob, bd_t *bd); +#endif + + +/* $(CPU)/serial.c */ +int serial_init (void); +void serial_setbrg (void); +void serial_putc (const char); +void serial_putc_raw(const char); +void serial_puts (const char *); +int serial_getc (void); +int serial_tstc (void); + +void _serial_setbrg (const int); +void _serial_putc (const char, const int); +void _serial_putc_raw(const char, const int); +void _serial_puts (const char *, const int); +int _serial_getc (const int); +int _serial_tstc (const int); + +/* $(CPU)/speed.c */ +int get_clocks (void); +int get_clocks_866 (void); +int sdram_adjust_866 (void); +int adjust_sdram_tbs_8xx (void); +#if defined(CONFIG_8260) +int prt_8260_clks (void); +#elif defined(CONFIG_MPC5xxx) +int prt_mpc5xxx_clks (void); +#endif +#if defined(CONFIG_MPC512X) +int prt_mpc512xxx_clks (void); +#endif +#if defined(CONFIG_MPC8220) +int prt_mpc8220_clks (void); +#endif +#ifdef CONFIG_4xx +ulong get_OPB_freq (void); +ulong get_PCI_freq (void); +#endif +#if defined(CONFIG_S3C24X0) || \ + defined(CONFIG_LH7A40X) || \ + defined(CONFIG_S3C6400) || \ + defined(CONFIG_EP93XX) +ulong get_FCLK (void); +ulong get_HCLK (void); +ulong get_PCLK (void); +ulong get_UCLK (void); +#endif +#if defined(CONFIG_LH7A40X) +ulong get_PLLCLK (void); +#endif +#if defined CONFIG_INCA_IP +uint incaip_get_cpuclk (void); +#endif +#if defined(CONFIG_IMX) +ulong get_systemPLLCLK(void); +ulong get_FCLK(void); +ulong get_HCLK(void); +ulong get_BCLK(void); +ulong get_PERCLK1(void); +ulong get_PERCLK2(void); +ulong get_PERCLK3(void); +#endif +ulong get_bus_freq (ulong); +int get_serial_clock(void); + +struct pt_regs; +/* $(CPU)/interrupts.c */ +int interrupt_init (void); +void timer_interrupt (struct pt_regs *); +void external_interrupt (struct pt_regs *); +void irq_install_handler(int, interrupt_handler_t *, void *); +void irq_free_handler (int); +void reset_timer (void); +ulong get_timer (ulong base); +void enable_interrupts (void); +int disable_interrupts (void); + +/* $(CPU)/.../commproc.c */ +int dpram_init (void); +uint dpram_base(void); +uint dpram_base_align(uint align); +uint dpram_alloc(uint size); +uint dpram_alloc_align(uint size,uint align); +void bootcount_store (ulong); +ulong bootcount_load (void); +#define BOOTCOUNT_MAGIC 0xB001C041 + +/* $(CPU)/.../ */ +void mii_init (void); + +/* $(CPU)/.../lcd.c */ +ulong lcd_setmem (ulong); + +/* $(CPU)/.../video.c */ +ulong video_setmem (ulong); + +/* arch/$(ARCH)/lib/cache.c */ +ulong dcache_get_line_size(void); +void enable_caches(void); +void flush_cache (unsigned long, unsigned long); +void flush_dcache_all(void); +void flush_dcache_range(unsigned long start, unsigned long stop); +void invalidate_dcache_range(unsigned long start, unsigned long stop); +void invalidate_dcache_all(void); +void invalidate_icache_all(void); + +/* arch/$(ARCH)/lib/ticks.S */ +unsigned long long get_ticks(void); +void wait_ticks (unsigned long); + +/* arch/$(ARCH)/lib/time.c */ +void __udelay (unsigned long); +ulong usec2ticks (unsigned long usec); +ulong ticks2usec (unsigned long ticks); +int init_timebase (void); + +/* lib/qsort.c */ +void qsort(void *base, size_t nmemb, size_t size, + int(*compar)(const void *, const void *)); +int strcmp_compar(const void *, const void *); + +/* lib/time.c */ +void udelay (unsigned long); + +#if 0 +/* lib/vsprintf.c */ +ulong simple_strtoul(const char *cp,char **endp,unsigned int base); +int strict_strtoul(const char *cp, unsigned int base, unsigned long *res); +unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base); +long simple_strtol(const char *cp,char **endp,unsigned int base); +void panic(const char *fmt, ...) + __attribute__ ((format (__printf__, 1, 2), noreturn)); +int sprintf(char * buf, const char *fmt, ...) + __attribute__ ((format (__printf__, 2, 3))); +int vsprintf(char *buf, const char *fmt, va_list args); +#endif + +/* lib/strmhz.c */ +char * strmhz(char *buf, unsigned long hz); + +/* Multicore arch functions */ +#ifdef CONFIG_MP +int cpu_status(int nr); +int cpu_reset(int nr); +int cpu_disable(int nr); +int cpu_release(int nr, int argc, char * const argv[]); +#endif + +#endif /* __ASSEMBLER__ */ + +/* Put only stuff here that the assembler can digest */ + +#ifdef CONFIG_POST +#define CONFIG_HAS_POST +#ifndef CONFIG_POST_ALT_LIST +#define CONFIG_POST_STD_LIST +#endif +#endif + +#ifdef CONFIG_INIT_CRITICAL +#error CONFIG_INIT_CRITICAL is deprecated! +#error Read section CONFIG_SKIP_LOWLEVEL_INIT in README. +#endif + +//#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define ROUND(a,b) (((a) + (b) - 1) & ~((b) - 1)) +#define DIV_ROUND(n,d) (((n) + ((d)/2)) / (d)) +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) + +//#define ALIGN(x,a) __ALIGN_MASK((x),(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + +#endif /* __COMMON_H_ */ diff --git a/src/arch/armv7/include/div64.h b/src/arch/armv7/include/div64.h new file mode 100644 index 0000000000..5baed2b294 --- /dev/null +++ b/src/arch/armv7/include/div64.h @@ -0,0 +1,233 @@ +/* taken from linux 2.6.31.14 */ + +#ifndef __ASM_ARM_DIV64 +#define __ASM_ARM_DIV64 + +//#include +//#include +// FIXME + +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" +#define __LINUX_ARM_ARCH__ 7 + +/* + * The semantics of do_div() are: + * + * uint32_t do_div(uint64_t *n, uint32_t base) + * { + * uint32_t remainder = *n % base; + * *n = *n / base; + * return remainder; + * } + * + * In other words, a 64-bit dividend with a 32-bit divisor producing + * a 64-bit result and a 32-bit remainder. To accomplish this optimally + * we call a special __do_div64 helper with completely non standard + * calling convention for arguments and results (beware). + */ + +#ifdef __ARMEB__ +#define __xh "r0" +#define __xl "r1" +#else +#define __xl "r0" +#define __xh "r1" +#endif + +#define __do_div_asm(n, base) \ +({ \ + register unsigned int __base asm("r4") = base; \ + register unsigned long long __n asm("r0") = n; \ + register unsigned long long __res asm("r2"); \ + register unsigned int __rem asm(__xh); \ + asm( __asmeq("%0", __xh) \ + __asmeq("%1", "r2") \ + __asmeq("%2", "r0") \ + __asmeq("%3", "r4") \ + "bl __do_div64" \ + : "=r" (__rem), "=r" (__res) \ + : "r" (__n), "r" (__base) \ + : "ip", "lr", "cc"); \ + n = __res; \ + __rem; \ +}) + +#if __GNUC__ < 4 + +/* + * gcc versions earlier than 4.0 are simply too problematic for the + * optimized implementation below. First there is gcc PR 15089 that + * tend to trig on more complex constructs, spurious .global __udivsi3 + * are inserted even if none of those symbols are referenced in the + * generated code, and those gcc versions are not able to do constant + * propagation on long long values anyway. + */ +#define do_div(n, base) __do_div_asm(n, base) + +#elif __GNUC__ >= 4 + +//#include + +/* + * If the divisor happens to be constant, we determine the appropriate + * inverse at compile time to turn the division into a few inline + * multiplications instead which is much faster. And yet only if compiling + * for ARMv4 or higher (we need umull/umlal) and if the gcc version is + * sufficiently recent to perform proper long long constant propagation. + * (It is unfortunate that gcc doesn't perform all this internally.) + */ +#define do_div(n, base) \ +({ \ + unsigned int __r, __b = (base); \ + if (!__builtin_constant_p(__b) || __b == 0 || \ + (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \ + /* non-constant divisor (or zero): slow path */ \ + __r = __do_div_asm(n, __b); \ + } else if ((__b & (__b - 1)) == 0) { \ + /* Trivial: __b is constant and a power of 2 */ \ + /* gcc does the right thing with this code. */ \ + __r = n; \ + __r &= (__b - 1); \ + n /= __b; \ + } else { \ + /* Multiply by inverse of __b: n/b = n*(p/b)/p */ \ + /* We rely on the fact that most of this code gets */ \ + /* optimized away at compile time due to constant */ \ + /* propagation and only a couple inline assembly */ \ + /* instructions should remain. Better avoid any */ \ + /* code construct that might prevent that. */ \ + unsigned long long __res, __x, __t, __m, __n = n; \ + unsigned int __c, __p, __z = 0; \ + /* preserve low part of n for reminder computation */ \ + __r = __n; \ + /* determine number of bits to represent __b */ \ + __p = 1 << __div64_fls(__b); \ + /* compute __m = ((__p << 64) + __b - 1) / __b */ \ + __m = (~0ULL / __b) * __p; \ + __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \ + /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \ + __x = ~0ULL / __b * __b - 1; \ + __res = (__m & 0xffffffff) * (__x & 0xffffffff); \ + __res >>= 32; \ + __res += (__m & 0xffffffff) * (__x >> 32); \ + __t = __res; \ + __res += (__x & 0xffffffff) * (__m >> 32); \ + __t = (__res < __t) ? (1ULL << 32) : 0; \ + __res = (__res >> 32) + __t; \ + __res += (__m >> 32) * (__x >> 32); \ + __res /= __p; \ + /* Now sanitize and optimize what we've got. */ \ + if (~0ULL % (__b / (__b & -__b)) == 0) { \ + /* those cases can be simplified with: */ \ + __n /= (__b & -__b); \ + __m = ~0ULL / (__b / (__b & -__b)); \ + __p = 1; \ + __c = 1; \ + } else if (__res != __x / __b) { \ + /* We can't get away without a correction */ \ + /* to compensate for bit truncation errors. */ \ + /* To avoid it we'd need an additional bit */ \ + /* to represent __m which would overflow it. */ \ + /* Instead we do m=p/b and n/b=(n*m+m)/p. */ \ + __c = 1; \ + /* Compute __m = (__p << 64) / __b */ \ + __m = (~0ULL / __b) * __p; \ + __m += ((~0ULL % __b + 1) * __p) / __b; \ + } else { \ + /* Reduce __m/__p, and try to clear bit 31 */ \ + /* of __m when possible otherwise that'll */ \ + /* need extra overflow handling later. */ \ + unsigned int __bits = -(__m & -__m); \ + __bits |= __m >> 32; \ + __bits = (~__bits) << 1; \ + /* If __bits == 0 then setting bit 31 is */ \ + /* unavoidable. Simply apply the maximum */ \ + /* possible reduction in that case. */ \ + /* Otherwise the MSB of __bits indicates the */ \ + /* best reduction we should apply. */ \ + if (!__bits) { \ + __p /= (__m & -__m); \ + __m /= (__m & -__m); \ + } else { \ + __p >>= __div64_fls(__bits); \ + __m >>= __div64_fls(__bits); \ + } \ + /* No correction needed. */ \ + __c = 0; \ + } \ + /* Now we have a combination of 2 conditions: */ \ + /* 1) whether or not we need a correction (__c), and */ \ + /* 2) whether or not there might be an overflow in */ \ + /* the cross product (__m & ((1<<63) | (1<<31))) */ \ + /* Select the best insn combination to perform the */ \ + /* actual __m * __n / (__p << 64) operation. */ \ + if (!__c) { \ + asm ( "umull %Q0, %R0, %1, %Q2\n\t" \ + "mov %Q0, #0" \ + : "=&r" (__res) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ + __res = __m; \ + asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \ + "mov %Q0, #0" \ + : "+&r" (__res) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } else { \ + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ + "cmn %Q0, %Q1\n\t" \ + "adcs %R0, %R0, %R1\n\t" \ + "adc %Q0, %3, #0" \ + : "=&r" (__res) \ + : "r" (__m), "r" (__n), "r" (__z) \ + : "cc" ); \ + } \ + if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ + asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \ + "umlal %R0, %Q0, %Q1, %R2\n\t" \ + "mov %R0, #0\n\t" \ + "umlal %Q0, %R0, %R1, %R2" \ + : "+&r" (__res) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } else { \ + asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \ + "umlal %R0, %1, %Q2, %R3\n\t" \ + "mov %R0, #0\n\t" \ + "adds %Q0, %1, %Q0\n\t" \ + "adc %R0, %R0, #0\n\t" \ + "umlal %Q0, %R0, %R2, %R3" \ + : "+&r" (__res), "+&r" (__z) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } \ + __res /= __p; \ + /* The reminder can be computed with 32-bit regs */ \ + /* only, and gcc is good at that. */ \ + { \ + unsigned int __res0 = __res; \ + unsigned int __b0 = __b; \ + __r -= __res0 * __b0; \ + } \ + /* BUG_ON(__r >= __b || __res * __b + __r != n); */ \ + n = __res; \ + } \ + __r; \ +}) + +/* our own fls implementation to make sure constant propagation is fine */ +#define __div64_fls(bits) \ +({ \ + unsigned int __left = (bits), __nr = 0; \ + if (__left & 0xffff0000) __nr += 16, __left >>= 16; \ + if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \ + if (__left & 0x000000f0) __nr += 4, __left >>= 4; \ + if (__left & 0x0000000c) __nr += 2, __left >>= 2; \ + if (__left & 0x00000002) __nr += 1; \ + __nr; \ +}) + +#endif + +#endif diff --git a/src/arch/armv7/include/global_data.h b/src/arch/armv7/include/global_data.h new file mode 100644 index 0000000000..4ae86aa0e6 --- /dev/null +++ b/src/arch/armv7/include/global_data.h @@ -0,0 +1,108 @@ +/* + * (C) Copyright 2002-2010 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __ASM_GBL_DATA_H +#define __ASM_GBL_DATA_H + +/* + * The following data structure is placed in some memory which is + * available very early after boot (like DPRAM on MPC8xx/MPC82xx, or + * some locked parts of the data cache) to allow for a minimum set of + * global variables during system initialization (until we have set + * up the memory controller so that we can use RAM). + * + * Keep it *SMALL* and remember to set GENERATED_GBL_DATA_SIZE > sizeof(gd_t) + */ + +typedef struct global_data { +// bd_t *bd; + unsigned long flags; + unsigned long baudrate; + unsigned long have_console; /* serial_init() was called */ +#ifdef CONFIG_PRE_CONSOLE_BUFFER + unsigned long precon_buf_idx; /* Pre-Console buffer index */ +#endif + unsigned long env_addr; /* Address of Environment struct */ + unsigned long env_valid; /* Checksum of Environment valid? */ + unsigned long fb_base; /* base address of frame buffer */ +#ifdef CONFIG_FSL_ESDHC + unsigned long sdhc_clk; +#endif +#ifdef CONFIG_AT91FAMILY + /* "static data" needed by at91's clock.c */ + unsigned long cpu_clk_rate_hz; + unsigned long main_clk_rate_hz; + unsigned long mck_rate_hz; + unsigned long plla_rate_hz; + unsigned long pllb_rate_hz; + unsigned long at91_pllb_usb_init; +#endif +#ifdef CONFIG_ARM + /* "static data" needed by most of timer.c on ARM platforms */ + unsigned long timer_rate_hz; + unsigned long tbl; + unsigned long tbu; + unsigned long long timer_reset_value; + unsigned long lastinc; +#endif +#ifdef CONFIG_IXP425 + unsigned long timestamp; +#endif + unsigned long relocaddr; /* Start address of U-Boot in RAM */ + unsigned long long ram_size; /* RAM size */ + unsigned long mon_len; /* monitor len */ + unsigned long irq_sp; /* irq stack pointer */ + unsigned long start_addr_sp; /* start_addr_stackpointer */ + unsigned long reloc_off; +#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) + unsigned long tlb_addr; + unsigned long tlb_size; +#endif + const void *fdt_blob; /* Our device tree, NULL if none */ +#ifdef CONFIG_SYS_SKIP_ARM_RELOCATION + ulong malloc_end; /* End of malloc region (addr + 1) */ +#endif + void **jt; /* jump table */ + char env_buf[32]; /* buffer for getenv() before reloc. */ +#if defined(CONFIG_POST) || defined(CONFIG_LOGBUFFER) + unsigned long post_log_word; /* Record POST activities */ + unsigned long post_log_res; /* success of POST test */ + unsigned long post_init_f_time; /* When post_init_f started */ +#endif +} gd_t; + +/* + * Global Data Flags + */ +#define GD_FLG_RELOC 0x00001 /* Code was relocated to RAM */ +#define GD_FLG_DEVINIT 0x00002 /* Devices have been initialized */ +#define GD_FLG_SILENT 0x00004 /* Silent mode */ +#define GD_FLG_POSTFAIL 0x00008 /* Critical POST test failed */ +#define GD_FLG_POSTSTOP 0x00010 /* POST seqeunce aborted */ +#define GD_FLG_LOGINIT 0x00020 /* Log Buffer has been initialized */ +#define GD_FLG_DISABLE_CONSOLE 0x00040 /* Disable console (in & out) */ +#define GD_FLG_ENV_READY 0x00080 /* Environment imported into hash table */ + +#define DECLARE_GLOBAL_DATA_PTR register volatile gd_t *gd asm ("r8") + +#endif /* __ASM_GBL_DATA_H */ diff --git a/src/arch/armv7/include/hang.h b/src/arch/armv7/include/hang.h new file mode 100644 index 0000000000..f57fc28f56 --- /dev/null +++ b/src/arch/armv7/include/hang.h @@ -0,0 +1,20 @@ + /* + * This file is part of the coreboot project. + * + * Copyright (C) 2012 The ChromiumOS Authors. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +void hang(void); diff --git a/src/arch/armv7/include/mmio_conf.h b/src/arch/armv7/include/mmio_conf.h new file mode 100644 index 0000000000..143c6bd613 --- /dev/null +++ b/src/arch/armv7/include/mmio_conf.h @@ -0,0 +1,6 @@ +#ifndef ARCH_MMIO_H +#define ARCH_MMIO_H 1 + +// Nut'n'Bitch + +#endif /* ARCH_MMIO_H */ diff --git a/src/arch/armv7/include/ptrace.h b/src/arch/armv7/include/ptrace.h new file mode 100644 index 0000000000..e981cadc47 --- /dev/null +++ b/src/arch/armv7/include/ptrace.h @@ -0,0 +1,30 @@ +#ifndef __ASM_ARM_PTRACE_H +#define __ASM_ARM_PTRACE_H + +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 + +#define PTRACE_SETOPTIONS 21 + +/* options set using PTRACE_SETOPTIONS */ +#define PTRACE_O_TRACESYSGOOD 0x00000001 + +#include + +#ifndef __ASSEMBLER__ +#define pc_pointer(v) \ + ((v) & ~PCMASK) + +#define instruction_pointer(regs) \ + (pc_pointer((regs)->ARM_pc)) + +extern void show_regs(struct pt_regs *); + +#define predicate(x) (x & 0xf0000000) +#define PREDICATE_ALWAYS 0xe0000000 + +#endif /* __ASSEMBLER__ */ + +#endif diff --git a/src/arch/armv7/include/smp/spinlock.h b/src/arch/armv7/include/smp/spinlock.h new file mode 100644 index 0000000000..1dc397c19c --- /dev/null +++ b/src/arch/armv7/include/smp/spinlock.h @@ -0,0 +1,52 @@ +#ifndef ARCH_SMP_SPINLOCK_H +#define ARCH_SMP_SPINLOCK_H + +/* FIXME: implement this for ARM */ +#error "implement this for ARM" +#if 0 +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + */ + +typedef struct { + volatile unsigned int lock; +} spinlock_t; + + +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } +#define DECLARE_SPIN_LOCK(x) static spinlock_t x = SPIN_LOCK_UNLOCKED; + +#define barrier() __asm__ __volatile__("": : :"memory") +#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) != 0) +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + +static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock) +{ + unsigned long tmp; + __asm__ __volatile__ ( + "1: ldrex %0, [%1]\n" + " teq %0, #0\n" + " strexeq %0, %2, [%1]\n" + " teqeq %0, #0\n" + " bne 1b\n" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "cc" + ); + barrier(); +} + +static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__( + " str %1, [%0]\n" + : + : "r" (&lock->lock), "r" (0) + : "cc" + ); +} + +#define cpu_relax() barrier() + +#endif +#endif /* ARCH_SMP_SPINLOCK_H */ diff --git a/src/arch/armv7/include/stdint.h b/src/arch/armv7/include/stdint.h new file mode 100644 index 0000000000..a8a023059a --- /dev/null +++ b/src/arch/armv7/include/stdint.h @@ -0,0 +1,81 @@ +#ifndef ARM_STDINT_H +#define ARM_STDINT_H + +#if defined(__GNUC__) +#define __HAVE_LONG_LONG__ 1 +#else +#define __HAVE_LONG_LONG__ 0 +#endif + +/* Exact integral types */ +typedef unsigned char uint8_t; +typedef signed char int8_t; + +typedef unsigned short uint16_t; +typedef signed short int16_t; + +typedef unsigned int uint32_t; +typedef signed int int32_t; + +#if __HAVE_LONG_LONG__ +typedef unsigned long long uint64_t; +typedef signed long long int64_t; +#endif + +/* Small types */ +typedef unsigned char uint_least8_t; +typedef signed char int_least8_t; + +typedef unsigned short uint_least16_t; +typedef signed short int_least16_t; + +typedef unsigned int uint_least32_t; +typedef signed int int_least32_t; + +#if __HAVE_LONG_LONG__ +typedef unsigned long long uint_least64_t; +typedef signed long long int_least64_t; +#endif + +/* Fast Types */ +typedef unsigned char uint_fast8_t; +typedef signed char int_fast8_t; + +typedef unsigned int uint_fast16_t; +typedef signed int int_fast16_t; + +typedef unsigned int uint_fast32_t; +typedef signed int int_fast32_t; + +#if __HAVE_LONG_LONG__ +typedef unsigned long long uint_fast64_t; +typedef signed long long int_fast64_t; +#endif + +/* Types for `void *' pointers. */ +typedef int intptr_t; +typedef unsigned int uintptr_t; + +/* Largest integral types */ +#if __HAVE_LONG_LONG__ +typedef long long int intmax_t; +typedef unsigned long long uintmax_t; +#else +typedef long int intmax_t; +typedef unsigned long int uintmax_t; +#endif + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +#if __HAVE_LONG_LONG__ +typedef uint64_t u64; +#endif +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; + + +#undef __HAVE_LONG_LONG__ + +#endif /* ARM_STDINT_H */ diff --git a/src/arch/armv7/include/system.h b/src/arch/armv7/include/system.h new file mode 100644 index 0000000000..3e14fdb0a5 --- /dev/null +++ b/src/arch/armv7/include/system.h @@ -0,0 +1,123 @@ +/* FIXME(dhendrix): This is split out from asm/system.h. */ +#ifndef SYSTEM_H_ +#define SYSTEM_H_ + +#define CPU_ARCH_UNKNOWN 0 +#define CPU_ARCH_ARMv3 1 +#define CPU_ARCH_ARMv4 2 +#define CPU_ARCH_ARMv4T 3 +#define CPU_ARCH_ARMv5 4 +#define CPU_ARCH_ARMv5T 5 +#define CPU_ARCH_ARMv5TE 6 +#define CPU_ARCH_ARMv5TEJ 7 +#define CPU_ARCH_ARMv6 8 +#define CPU_ARCH_ARMv7 9 + +/* + * CR1 bits (CP#15 CR1) + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_C (1 << 2) /* Dcache enable */ +#define CR_W (1 << 3) /* Write buffer enable */ +#define CR_P (1 << 4) /* 32-bit exception handler */ +#define CR_D (1 << 5) /* 32-bit data address range */ +#define CR_L (1 << 6) /* Implementation defined */ +#define CR_B (1 << 7) /* Big endian */ +#define CR_S (1 << 8) /* System MMU protection */ +#define CR_R (1 << 9) /* ROM MMU protection */ +#define CR_F (1 << 10) /* Implementation defined */ +#define CR_Z (1 << 11) /* Implementation defined */ +#define CR_I (1 << 12) /* Icache enable */ +#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ +#define CR_RR (1 << 14) /* Round Robin cache replacement */ +#define CR_L4 (1 << 15) /* LDR pc can set T bit */ +#define CR_DT (1 << 16) +#define CR_IT (1 << 18) +#define CR_ST (1 << 19) +#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ +#define CR_U (1 << 22) /* Unaligned access operation */ +#define CR_XP (1 << 23) /* Extended page tables */ +#define CR_VE (1 << 24) /* Vectored interrupts */ +#define CR_EE (1 << 25) /* Exception (Big) Endian */ +#define CR_TRE (1 << 28) /* TEX remap enable */ +#define CR_AFE (1 << 29) /* Access flag enable */ +#define CR_TE (1 << 30) /* Thumb exception enable */ + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences. Apparently we can't trust + * the compiler from one version to another so a bit of paranoia won't hurt. + * This string is meant to be concatenated with the inline asm string and + * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) + */ +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + +#define isb() __asm__ __volatile__ ("" : : : "memory") + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#define arch_align_stack(x) (x) + +#ifndef __ASSEMBLY__ +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" + : : "r" (val) : "cc"); + isb(); +} + +/* options available for data cache on each page */ +enum dcache_option { + DCACHE_OFF, + DCACHE_WRITETHROUGH, + DCACHE_WRITEBACK, +}; + +/* Size of an MMU section */ +enum { + MMU_SECTION_SHIFT = 20, + MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, +}; + +/** + * Change the cache settings for a region. + * + * \param start start address of memory region to change + * \param size size of memory region to change + * \param option dcache option to select + */ +void mmu_set_region_dcache(unsigned long start, int size, + enum dcache_option option); + +/** + * Register an update to the page tables, and flush the TLB + * + * \param start start address of update in page table + * \param stop stop address of update in page table + */ +void mmu_page_table_flush(unsigned long start, unsigned long stop); + +void dram_bank_mmu_setup(unsigned long start, unsigned long size); + +void arm_init_before_mmu(void); + + /* + * FIXME: sdelay, sr32, and wait_on_value originally came from + * arch/arm/cpu/armv7/exynos5/setup.h in u-boot but do not seem + * specific to exynos5... + */ +void sdelay(unsigned long loops); +void sr32(void *addr, u32 start_bit, u32 num_bits, u32 value); +u32 wait_on_value(u32 read_bit_mask, u32 match_value, void *read_addr, + u32 bound); +#endif // __ASSEMBLY__ +#endif /* SYSTEM_H_ */ diff --git a/src/arch/armv7/include/utils.h b/src/arch/armv7/include/utils.h new file mode 100644 index 0000000000..828b86cb36 --- /dev/null +++ b/src/arch/armv7/include/utils.h @@ -0,0 +1,56 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, + * Aneesh V + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#ifndef _UTILS_H_ +#define _UTILS_H_ + +static inline s32 log_2_n_round_up(u32 n) +{ + s32 log2n = -1; + u32 temp = n; + + while (temp) { + log2n++; + temp >>= 1; + } + + if (n & (n - 1)) + return log2n + 1; /* not power of 2 - round up */ + else + return log2n; /* power of 2 */ +} + +static inline s32 log_2_n_round_down(u32 n) +{ + s32 log2n = -1; + u32 temp = n; + + while (temp) { + log2n++; + temp >>= 1; + } + + return log2n; +} + +#endif diff --git a/src/arch/armv7/ldscript_failover.lb b/src/arch/armv7/ldscript_failover.lb new file mode 100644 index 0000000000..9a3b9aef8e --- /dev/null +++ b/src/arch/armv7/ldscript_failover.lb @@ -0,0 +1,74 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2008-2010 coresystems GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* We use ELF as output format. So that we can debug the code in some form. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") +OUTPUT_ARCH(arm) + +MEMORY { + rom : ORIGIN = 0xffff0000, LENGTH = 64K +} + +TARGET(binary) +SECTIONS +{ + /* Symbol ap_sipi_vector must be aligned to 4kB to start AP CPUs + * with Startup IPI message without RAM. Align .rom to next 4 byte + * boundary anyway, so no pad byte appears between _rom and _start. + */ + .bogus ROMLOC_MIN : { + . = CONFIG_SIPI_VECTOR_IN_ROM ? ALIGN(4096) : ALIGN(4); + ROMLOC = .; + } >rom = 0xff + + /* This section might be better named .setup */ + .rom ROMLOC : { + _rom = .; + ap_sipi_vector = .; + *(.rom.text); + *(.rom.data); + *(.rom.data.*); + *(.rodata.*); + _erom = .; + } >rom = 0xff + + /* Allocation reserves extra 16 bytes here. Alignment requirements + * may cause the total size of a section to change when the start + * address gets applied. + */ + ROMLOC_MIN = 0xffffff00 - (_erom - _rom + 16) - + (CONFIG_SIPI_VECTOR_IN_ROM ? 4096 : 0); + + /* Post-check proper SIPI vector. */ + _bogus = ASSERT(!CONFIG_SIPI_VECTOR_IN_ROM || ((ap_sipi_vector & 0x0fff) == 0x0), + "Bad SIPI vector alignment"); + _bogus = ASSERT(!CONFIG_SIPI_VECTOR_IN_ROM || (ap_sipi_vector == CONFIG_AP_SIPI_VECTOR), + "Address mismatch on AP_SIPI_VECTOR"); + + /DISCARD/ : { + *(.comment) + *(.note) + *(.comment.*) + *(.note.*) + *(.iplt) + *(.rel.*) + *(.igot.*) + } +} diff --git a/src/arch/armv7/ldscript_fallback_cbfs.lb b/src/arch/armv7/ldscript_fallback_cbfs.lb new file mode 100644 index 0000000000..148f4e3184 --- /dev/null +++ b/src/arch/armv7/ldscript_fallback_cbfs.lb @@ -0,0 +1,51 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2012 Google Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* We use ELF as output format. So that we can debug the code in some form. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") +OUTPUT_ARCH(arm) + +TARGET(binary) +SECTIONS +{ + . = CONFIG_ROMBASE; + + /* cut _start into last 64k*/ + _x = .; + . = (_x < (CONFIG_ROMBASE - 0x10000 + CONFIG_ROM_IMAGE_SIZE)) ? (CONFIG_ROMBASE - 0x10000 + CONFIG_ROM_IMAGE_SIZE) : _x; + + /* This section might be better named .setup */ + .rom . : { + _rom = .; + *(.text); + *(.rodata); + *(.rodata.*); + *(.data.*); + . = ALIGN(16); + _erom = .; + } + + /DISCARD/ : { + *(.comment) + *(.note) + *(.comment.*) + *(.note.*) + } + _bogus = ASSERT((SIZEOF(.bss) + SIZEOF(.data)) == 0, "Do not use global variables in romstage"); +} diff --git a/src/arch/armv7/lib/Makefile.inc b/src/arch/armv7/lib/Makefile.inc new file mode 100644 index 0000000000..bdf6a64ffb --- /dev/null +++ b/src/arch/armv7/lib/Makefile.inc @@ -0,0 +1,26 @@ +romstage-y += cache_v7.c +romstage-y += cache-cp15.c +romstage-y += div0.c +romstage-y += div64.S +romstage-y += hang_spl.c +romstage-y += romstage_console.c +romstage-y += syslib.c + +#ramstage-y += printk_init.c +#romstage-y += walkcbfs.S + +ramstage-y += c_start.S + +#ramstage-y += div.c +ramstage-y += div0.c +ramstage-y += div64.S +ramstage-y += hang_spl.c +#ramstage-y += interrupts.c +#ramstage-y += memcpy.S +#ramstage-y += memset.S +#ramstage-y += reset.c +ramstage-y += syslib.c + +#FIXME(dhendrix): should this be a config option? +romstage-y += eabi_compat.c +ramstage-y += eabi_compat.c diff --git a/src/arch/armv7/lib/Makefile.uboot b/src/arch/armv7/lib/Makefile.uboot new file mode 100644 index 0000000000..d33d08b14d --- /dev/null +++ b/src/arch/armv7/lib/Makefile.uboot @@ -0,0 +1,95 @@ +# +# (C) Copyright 2002-2006 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +include $(TOPDIR)/config.mk + +LIB = $(obj)lib$(ARCH).o +LIBGCC = $(obj)libgcc.o + +ifndef CONFIG_SPL_BUILD +GLSOBJS += _ashldi3.o +GLSOBJS += _ashrdi3.o +GLSOBJS += _divsi3.o +GLSOBJS += _lshrdi3.o +GLSOBJS += _modsi3.o +GLSOBJS += _udivsi3.o +GLSOBJS += _umodsi3.o +ifdef CONFIG_CHROMEOS +GLSOBJS += _uldivmod.o +endif + +GLCOBJS += div0.o + +ifeq ($(CONFIG_SYS_GENERIC_BOARD),) +COBJS-y += board.o +endif +COBJS-y += bootm.o +COBJS-y += cache.o +COBJS-y += cache-cp15.o +COBJS-$(CONFIG_SYS_L2_PL310) += cache-pl310.o +SOBJS-$(CONFIG_USE_ARCH_MEMSET) += memset.o +SOBJS-$(CONFIG_USE_ARCH_MEMCPY) += memcpy.o +else # CONFIG_SPL_BUILD +ifeq ($(CONFIG_SYS_GENERIC_BOARD),) +COBJS-y += hang_spl.o +endif +endif # CONFIG_SPL_BUILD + +COBJS-y += interrupts.o +COBJS-y += reset.o + +SRCS := $(GLSOBJS:.o=.S) $(GLCOBJS:.o=.c) \ + $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c) +OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y)) +LGOBJS := $(addprefix $(obj),$(GLSOBJS)) \ + $(addprefix $(obj),$(GLCOBJS)) + +# Always build libarm.o +TARGETS := $(LIB) + +# Build private libgcc only when asked for +ifdef USE_PRIVATE_LIBGCC +TARGETS += $(LIBGCC) +endif + +# For EABI conformant tool chains, provide eabi_compat() +ifneq (,$(findstring -mabi=aapcs-linux,$(PLATFORM_CPPFLAGS))) +TARGETS += $(obj)eabi_compat.o +endif + +all: $(TARGETS) + +$(LIB): $(obj).depend $(OBJS) + $(call cmd_link_o_target, $(OBJS)) + +$(LIBGCC): $(obj).depend $(LGOBJS) + $(call cmd_link_o_target, $(LGOBJS)) + +######################################################################### + +# defines $(obj).depend target +include $(SRCTREE)/rules.mk + +sinclude $(obj).depend + +######################################################################### diff --git a/src/arch/armv7/lib/_divsi3.S b/src/arch/armv7/lib/_divsi3.S new file mode 100644 index 0000000000..cfbadb2ab9 --- /dev/null +++ b/src/arch/armv7/lib/_divsi3.S @@ -0,0 +1,142 @@ + +.macro ARM_DIV_BODY dividend, divisor, result, curbit + +#if __LINUX_ARM_ARCH__ >= 5 + + clz \curbit, \divisor + clz \result, \dividend + sub \result, \curbit, \result + mov \curbit, #1 + mov \divisor, \divisor, lsl \result + mov \curbit, \curbit, lsl \result + mov \result, #0 + +#else + + @ Initially shift the divisor left 3 bits if possible, + @ set curbit accordingly. This allows for curbit to be located + @ at the left end of each 4 bit nibbles in the division loop + @ to save one loop in most cases. + tst \divisor, #0xe0000000 + moveq \divisor, \divisor, lsl #3 + moveq \curbit, #8 + movne \curbit, #1 + + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. +1: cmp \divisor, #0x10000000 + cmplo \divisor, \dividend + movlo \divisor, \divisor, lsl #4 + movlo \curbit, \curbit, lsl #4 + blo 1b + + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. +1: cmp \divisor, #0x80000000 + cmplo \divisor, \dividend + movlo \divisor, \divisor, lsl #1 + movlo \curbit, \curbit, lsl #1 + blo 1b + + mov \result, #0 + +#endif + + @ Division loop +1: cmp \dividend, \divisor + subhs \dividend, \dividend, \divisor + orrhs \result, \result, \curbit + cmp \dividend, \divisor, lsr #1 + subhs \dividend, \dividend, \divisor, lsr #1 + orrhs \result, \result, \curbit, lsr #1 + cmp \dividend, \divisor, lsr #2 + subhs \dividend, \dividend, \divisor, lsr #2 + orrhs \result, \result, \curbit, lsr #2 + cmp \dividend, \divisor, lsr #3 + subhs \dividend, \dividend, \divisor, lsr #3 + orrhs \result, \result, \curbit, lsr #3 + cmp \dividend, #0 @ Early termination? + movnes \curbit, \curbit, lsr #4 @ No, any more bits to do? + movne \divisor, \divisor, lsr #4 + bne 1b + +.endm + +.macro ARM_DIV2_ORDER divisor, order + +#if __LINUX_ARM_ARCH__ >= 5 + + clz \order, \divisor + rsb \order, \order, #31 + +#else + + cmp \divisor, #(1 << 16) + movhs \divisor, \divisor, lsr #16 + movhs \order, #16 + movlo \order, #0 + + cmp \divisor, #(1 << 8) + movhs \divisor, \divisor, lsr #8 + addhs \order, \order, #8 + + cmp \divisor, #(1 << 4) + movhs \divisor, \divisor, lsr #4 + addhs \order, \order, #4 + + cmp \divisor, #(1 << 2) + addhi \order, \order, #3 + addls \order, \order, \divisor, lsr #1 + +#endif + +.endm + + .align 5 +.globl __divsi3 +.globl __aeabi_idiv +__divsi3: +__aeabi_idiv: + cmp r1, #0 + eor ip, r0, r1 @ save the sign of the result. + beq Ldiv0 + rsbmi r1, r1, #0 @ loops below use unsigned. + subs r2, r1, #1 @ division by 1 or -1 ? + beq 10f + movs r3, r0 + rsbmi r3, r0, #0 @ positive dividend value + cmp r3, r1 + bls 11f + tst r1, r2 @ divisor is power of 2 ? + beq 12f + + ARM_DIV_BODY r3, r1, r0, r2 + + cmp ip, #0 + rsbmi r0, r0, #0 + mov pc, lr + +10: teq ip, r0 @ same sign ? + rsbmi r0, r0, #0 + mov pc, lr + +11: movlo r0, #0 + moveq r0, ip, asr #31 + orreq r0, r0, #1 + mov pc, lr + +12: ARM_DIV2_ORDER r1, r2 + + cmp ip, #0 + mov r0, r3, lsr r2 + rsbmi r0, r0, #0 + mov pc, lr + +Ldiv0: + + str lr, [sp, #-4]! + bl __div0 + mov r0, #0 @ About as wrong as it could be. + ldr pc, [sp], #4 diff --git a/src/arch/armv7/lib/_udivsi3.S b/src/arch/armv7/lib/_udivsi3.S new file mode 100644 index 0000000000..1309802610 --- /dev/null +++ b/src/arch/armv7/lib/_udivsi3.S @@ -0,0 +1,93 @@ +/* # 1 "libgcc1.S" */ +@ libgcc1 routines for ARM cpu. +@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk) +dividend .req r0 +divisor .req r1 +result .req r2 +curbit .req r3 +/* ip .req r12 */ +/* sp .req r13 */ +/* lr .req r14 */ +/* pc .req r15 */ + .text + .globl __udivsi3 + .type __udivsi3 ,function + .globl __aeabi_uidiv + .type __aeabi_uidiv ,function + .align 0 + __udivsi3: + __aeabi_uidiv: + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + mov result, #0 + cmp dividend, divisor + bcc Lgot_result +Loop1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc Loop1 +Lbignum: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc Lbignum +Loop3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + subcs dividend, dividend, divisor + orrcs result, result, curbit + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs result, result, curbit, lsr #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs result, result, curbit, lsr #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs result, result, curbit, lsr #3 + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne Loop3 +Lgot_result: + mov r0, result + mov pc, lr +Ldiv0: + str lr, [sp, #-4]! + bl __div0 (PLT) + mov r0, #0 @ about as wrong as it could be + ldmia sp!, {pc} + .size __udivsi3 , . - __udivsi3 + +.globl __aeabi_uidivmod +__aeabi_uidivmod: + + stmfd sp!, {r0, r1, ip, lr} + bl __aeabi_uidiv + ldmfd sp!, {r1, r2, ip, lr} + mul r3, r0, r2 + sub r1, r1, r3 + mov pc, lr + +.globl __aeabi_idivmod +__aeabi_idivmod: + + stmfd sp!, {r0, r1, ip, lr} + bl __aeabi_idiv + ldmfd sp!, {r1, r2, ip, lr} + mul r3, r0, r2 + sub r1, r1, r3 + mov pc, lr diff --git a/src/arch/armv7/lib/_uldivmod.S b/src/arch/armv7/lib/_uldivmod.S new file mode 100644 index 0000000000..65e4fa8441 --- /dev/null +++ b/src/arch/armv7/lib/_uldivmod.S @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2011 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + */ + +/* + * A, Q = r0 + (r1 << 32) + * B, R = r2 + (r3 << 32) + * A / B = Q ... R + */ + + .text + .global __aeabi_uldivmod + .type __aeabi_uldivmod, function + .align 0 + +A_0 .req r0 +A_1 .req r1 +B_0 .req r2 +B_1 .req r3 +C_0 .req r4 +C_1 .req r5 +D_0 .req r6 +D_1 .req r7 + +Q_0 .req r0 +Q_1 .req r1 +R_0 .req r2 +R_1 .req r3 + +__aeabi_uldivmod: + stmfd sp!, {r4, r5, r6, r7, lr} + @ Test if B == 0 + orrs ip, B_0, B_1 @ Z set -> B == 0 + beq L_div_by_0 + @ Test if B is power of 2: (B & (B - 1)) == 0 + subs C_0, B_0, #1 + sbc C_1, B_1, #0 + tst C_0, B_0 + tsteq B_1, C_1 + beq L_pow2 + @ Test if A_1 == B_1 == 0 + orrs ip, A_1, B_1 + beq L_div_32_32 + +L_div_64_64: + mov C_0, #1 + mov C_1, #0 + @ D_0 = clz A + teq A_1, #0 + clz D_0, A_1 + clzeq ip, A_0 + addeq D_0, D_0, ip + @ D_1 = clz B + teq B_1, #0 + clz D_1, B_1 + clzeq ip, B_0 + addeq D_1, D_1, ip + @ if clz B - clz A > 0 + subs D_0, D_1, D_0 + bls L_done_shift + @ B <<= (clz B - clz A) + subs D_1, D_0, #32 + rsb ip, D_0, #32 + movmi B_1, B_1, lsl D_0 + orrmi B_1, B_1, B_0, lsr ip + movpl B_1, B_0, lsl D_1 + mov B_0, B_0, lsl D_0 + @ C = 1 << (clz B - clz A) + movmi C_1, C_1, lsl D_0 + orrmi C_1, C_1, C_0, lsr ip + movpl C_1, C_0, lsl D_1 + mov C_0, C_0, lsl D_0 +L_done_shift: + mov D_0, #0 + mov D_1, #0 + @ C: current bit; D: result +L_subtract: + @ if A >= B + cmp A_1, B_1 + cmpeq A_0, B_0 + bcc L_update + @ A -= B + subs A_0, A_0, B_0 + sbc A_1, A_1, B_1 + @ D |= C + orr D_0, D_0, C_0 + orr D_1, D_1, C_1 +L_update: + @ if A == 0: break + orrs ip, A_1, A_0 + beq L_exit + @ C >>= 1 + movs C_1, C_1, lsr #1 + movs C_0, C_0, rrx + @ if C == 0: break + orrs ip, C_1, C_0 + beq L_exit + @ B >>= 1 + movs B_1, B_1, lsr #1 + mov B_0, B_0, rrx + b L_subtract +L_exit: + @ Note: A, B & Q, R are aliases + mov R_0, A_0 + mov R_1, A_1 + mov Q_0, D_0 + mov Q_1, D_1 + ldmfd sp!, {r4, r5, r6, r7, pc} + +L_div_32_32: + @ Note: A_0 & r0 are aliases + @ Q_1 r1 + mov r1, B_0 + bl __aeabi_uidivmod + mov R_0, r1 + mov R_1, #0 + mov Q_1, #0 + ldmfd sp!, {r4, r5, r6, r7, pc} + +L_pow2: + @ Note: A, B and Q, R are aliases + @ R = A & (B - 1) + and C_0, A_0, C_0 + and C_1, A_1, C_1 + @ Q = A >> log2(B) + @ Note: B must not be 0 here! + clz D_0, B_0 + add D_1, D_0, #1 + rsbs D_0, D_0, #31 + bpl L_1 + clz D_0, B_1 + rsb D_0, D_0, #31 + mov A_0, A_1, lsr D_0 + add D_0, D_0, #32 +L_1: + movpl A_0, A_0, lsr D_0 + orrpl A_0, A_0, A_1, lsl D_1 + mov A_1, A_1, lsr D_0 + @ Mov back C to R + mov R_0, C_0 + mov R_1, C_1 + ldmfd sp!, {r4, r5, r6, r7, pc} + +L_div_by_0: + bl __div0 + @ As wrong as it could be + mov Q_0, #0 + mov Q_1, #0 + mov R_0, #0 + mov R_1, #0 + ldmfd sp!, {r4, r5, r6, r7, pc} diff --git a/src/arch/armv7/lib/bootm.c b/src/arch/armv7/lib/bootm.c new file mode 100644 index 0000000000..afe1b70814 --- /dev/null +++ b/src/arch/armv7/lib/bootm.c @@ -0,0 +1,405 @@ +/* + * (C) Copyright 2002 + * Sysgo Real-Time Solutions, GmbH + * Marius Groeger + * + * Copyright (C) 2001 Erik Mouw (J.A.K.Mouw@its.tudelft.nl) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +DECLARE_GLOBAL_DATA_PTR; + +#if defined (CONFIG_SETUP_MEMORY_TAGS) || \ + defined (CONFIG_CMDLINE_TAG) || \ + defined (CONFIG_INITRD_TAG) || \ + defined (CONFIG_SERIAL_TAG) || \ + defined (CONFIG_REVISION_TAG) +static void setup_start_tag (bd_t *bd); + +# ifdef CONFIG_SETUP_MEMORY_TAGS +static void setup_memory_tags (bd_t *bd); +# endif +static void setup_commandline_tag (bd_t *bd, char *commandline); + +# ifdef CONFIG_INITRD_TAG +static void setup_initrd_tag (bd_t *bd, ulong initrd_start, + ulong initrd_end); +# endif +static void setup_end_tag (bd_t *bd); + +# if defined (CONFIG_VFD) || defined (CONFIG_LCD) +# if !defined(CONFIG_VIDEOFLB_ATAG_NOT_SUPPORTED) +static void setup_videolfb_tag (gd_t *gd); +# endif +# endif + +static struct tag *params; +#endif /* CONFIG_SETUP_MEMORY_TAGS || CONFIG_CMDLINE_TAG || CONFIG_INITRD_TAG */ + +static ulong get_sp(void); +#if defined(CONFIG_OF_LIBFDT) && !defined(CONFIG_OF_NO_KERNEL) +static int bootm_linux_fdt(int machid, bootm_headers_t *images); +#endif + +void arch_lmb_reserve(struct lmb *lmb) +{ + ulong sp; + + /* + * Booting a (Linux) kernel image + * + * Allocate space for command line and board info - the + * address should be as high as possible within the reach of + * the kernel (see CONFIG_SYS_BOOTMAPSZ settings), but in unused + * memory, which means far enough below the current stack + * pointer. + */ + sp = get_sp(); + debug("## Current stack ends at 0x%08lx ", sp); + + /* adjust sp by 1K to be safe */ + sp -= 1024; + lmb_reserve(lmb, sp, + gd->bd->bi_dram[0].start + gd->bd->bi_dram[0].size - sp); +} + +static void announce_and_cleanup(void) +{ + printf("\nStarting kernel ...\n\n"); + bootstage_mark_name(BOOTSTAGE_ID_BOOTM_HANDOFF, "start_kernel"); + +#ifdef CONFIG_USB_DEVICE + { + extern void udc_disconnect(void); + udc_disconnect(); + } +#endif + cleanup_before_linux(); +} + +int do_bootm_linux(int flag, int argc, char *argv[], bootm_headers_t *images) +{ + bd_t *bd = gd->bd; + char *s; + int machid = bd->bi_arch_number; + void (*kernel_entry)(int zero, int arch, uint params); + +#ifdef CONFIG_CMDLINE_TAG + char *commandline = getenv ("bootargs"); +#endif + + if ((flag != 0) && (flag != BOOTM_STATE_OS_GO)) + return 1; + + s = getenv ("machid"); + if (s) { + machid = simple_strtoul (s, NULL, 16); + printf ("Using machid 0x%x from environment\n", machid); + } + + bootstage_mark(BOOTSTAGE_ID_RUN_OS); + +#if defined(CONFIG_OF_LIBFDT) && !defined(CONFIG_OF_NO_KERNEL) + if (images->ft_len) + return bootm_linux_fdt(machid, images); +#endif + + kernel_entry = (void (*)(int, int, uint))images->ep; + + debug ("## Transferring control to Linux (at address %08lx) ...\n", + (ulong) kernel_entry); + +#if defined (CONFIG_SETUP_MEMORY_TAGS) || \ + defined (CONFIG_CMDLINE_TAG) || \ + defined (CONFIG_INITRD_TAG) || \ + defined (CONFIG_SERIAL_TAG) || \ + defined (CONFIG_REVISION_TAG) + setup_start_tag (bd); +#ifdef CONFIG_SERIAL_TAG + setup_serial_tag (¶ms); +#endif +#ifdef CONFIG_REVISION_TAG + setup_revision_tag (¶ms); +#endif +#ifdef CONFIG_SETUP_MEMORY_TAGS + setup_memory_tags (bd); +#endif +#ifdef CONFIG_CMDLINE_TAG + setup_commandline_tag (bd, commandline); +#endif +#ifdef CONFIG_INITRD_TAG + if (images->rd_start && images->rd_end) + setup_initrd_tag (bd, images->rd_start, images->rd_end); +#endif +#if defined (CONFIG_VFD) || defined (CONFIG_LCD) +#if !defined(CONFIG_VIDEOFLB_ATAG_NOT_SUPPORTED) + setup_videolfb_tag ((gd_t *) gd); +#endif +#endif + setup_end_tag (bd); +#endif + + announce_and_cleanup(); + + kernel_entry(0, machid, bd->bi_boot_params); + /* does not return */ + + return 1; +} + +#if defined(CONFIG_OF_LIBFDT) && !defined(CONFIG_OF_NO_KERNEL) +static int fixup_memory_node(void *blob) +{ + bd_t *bd = gd->bd; + int bank; + u64 start[CONFIG_NR_DRAM_BANKS]; + u64 size[CONFIG_NR_DRAM_BANKS]; + + for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { + start[bank] = bd->bi_dram[bank].start; + size[bank] = bd->bi_dram[bank].size; + } + + return fdt_fixup_memory_banks(blob, start, size, CONFIG_NR_DRAM_BANKS); +} + +static int bootm_linux_fdt(int machid, bootm_headers_t *images) +{ + ulong rd_len; + void (*kernel_entry)(int zero, int dt_machid, void *dtblob); + ulong of_size = images->ft_len; + char **of_flat_tree = &images->ft_addr; + ulong *initrd_start = &images->initrd_start; + ulong *initrd_end = &images->initrd_end; + struct lmb *lmb = &images->lmb; + int ret; + + kernel_entry = (void (*)(int, int, void *))images->ep; + + boot_fdt_add_mem_rsv_regions(lmb, *of_flat_tree); + + rd_len = images->rd_end - images->rd_start; + ret = boot_ramdisk_high(lmb, images->rd_start, rd_len, + initrd_start, initrd_end); + if (ret) + return ret; + +#ifdef CONFIG_OF_BOARD_SETUP + /* Try to reserve 1024 bytes for board fixups */ + if (!fdt_open_into(*of_flat_tree, *of_flat_tree, of_size + 1024)) + of_size += 1024; + /* Call the board-specific fixup routine */ + ft_board_setup(*of_flat_tree, gd->bd); +#endif +#ifdef CONFIG_OF_UPDATE_FDT_BEFORE_BOOT + /* this must be earlier than boot_relocate_fdt */ + ret = fit_update_fdt_before_boot(*of_flat_tree, &of_size); + if (ret) + return ret; +#endif + + ret = boot_relocate_fdt(lmb, of_flat_tree, &of_size); + if (ret) + return ret; + + debug("## Transferring control to Linux (at address %08lx) ...\n", + (ulong) kernel_entry); + + fdt_chosen(*of_flat_tree, 1); + + fixup_memory_node(*of_flat_tree); + + fdt_fixup_ethernet(*of_flat_tree); + + fdt_initrd(*of_flat_tree, *initrd_start, *initrd_end, 1); + + announce_and_cleanup(); + + kernel_entry(0, machid, *of_flat_tree); + /* does not return */ + + return 1; +} +#endif + +#if defined (CONFIG_SETUP_MEMORY_TAGS) || \ + defined (CONFIG_CMDLINE_TAG) || \ + defined (CONFIG_INITRD_TAG) || \ + defined (CONFIG_SERIAL_TAG) || \ + defined (CONFIG_REVISION_TAG) +static void setup_start_tag (bd_t *bd) +{ + params = (struct tag *) bd->bi_boot_params; + + params->hdr.tag = ATAG_CORE; + params->hdr.size = tag_size (tag_core); + +#if defined (ATAG_CORE_FLAGS) && \ + defined (ATAG_PAGE_SIZE) && \ + defined (ATAG_CORE_RDEV) + params->u.core.flags = ATAG_CORE_FLAGS; + params->u.core.pagesize = ATAG_PAGE_SIZE; + params->u.core.rootdev = ATAG_CORE_RDEV; +#else + params->u.core.flags = 0; + params->u.core.pagesize = 0; + params->u.core.rootdev = 0; +#endif + + params = tag_next (params); +} + + +#ifdef CONFIG_SETUP_MEMORY_TAGS +static void setup_memory_tags (bd_t *bd) +{ + int i; + + for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { + params->hdr.tag = ATAG_MEM; + params->hdr.size = tag_size (tag_mem32); + + params->u.mem.start = bd->bi_dram[i].start; + params->u.mem.size = bd->bi_dram[i].size; + + params = tag_next (params); + } +} +#endif /* CONFIG_SETUP_MEMORY_TAGS */ + + +static void setup_commandline_tag (bd_t *bd, char *commandline) +{ + char *p; + + if (!commandline) + return; + + /* eat leading white space */ + for (p = commandline; *p == ' '; p++); + + /* skip non-existent command lines so the kernel will still + * use its default command line. + */ + if (*p == '\0') + return; + + params->hdr.tag = ATAG_CMDLINE; + params->hdr.size = + (sizeof (struct tag_header) + strlen (p) + 1 + 4) >> 2; + + strcpy (params->u.cmdline.cmdline, p); + + params = tag_next (params); +} + + +#ifdef CONFIG_INITRD_TAG +static void setup_initrd_tag (bd_t *bd, ulong initrd_start, ulong initrd_end) +{ + /* an ATAG_INITRD node tells the kernel where the compressed + * ramdisk can be found. ATAG_RDIMG is a better name, actually. + */ + params->hdr.tag = ATAG_INITRD2; + params->hdr.size = tag_size (tag_initrd); + + params->u.initrd.start = initrd_start; + params->u.initrd.size = initrd_end - initrd_start; + + params = tag_next (params); +} +#endif /* CONFIG_INITRD_TAG */ + +#if defined (CONFIG_VFD) || defined (CONFIG_LCD) +#if !defined(CONFIG_VIDEOFLB_ATAG_NOT_SUPPORTED) +extern ulong calc_fbsize (void); +static void setup_videolfb_tag (gd_t *gd) +{ + /* An ATAG_VIDEOLFB node tells the kernel where and how large + * the framebuffer for video was allocated (among other things). + * Note that a _physical_ address is passed ! + * + * We only use it to pass the address and size, the other entries + * in the tag_videolfb are not of interest. + */ + params->hdr.tag = ATAG_VIDEOLFB; + params->hdr.size = tag_size (tag_videolfb); + + params->u.videolfb.lfb_base = (u32) gd->fb_base; + /* Fb size is calculated according to parameters for our panel + */ + params->u.videolfb.lfb_size = calc_fbsize(); + + params = tag_next (params); +} +#endif /* CONFIG_VIDEOFLB_ATAG_NOT_SUPPORTED */ +#endif /* CONFIG_VFD || CONFIG_LCD */ + +#ifdef CONFIG_SERIAL_TAG +void setup_serial_tag (struct tag **tmp) +{ + struct tag *params = *tmp; + struct tag_serialnr serialnr; + void get_board_serial(struct tag_serialnr *serialnr); + + get_board_serial(&serialnr); + params->hdr.tag = ATAG_SERIAL; + params->hdr.size = tag_size (tag_serialnr); + params->u.serialnr.low = serialnr.low; + params->u.serialnr.high= serialnr.high; + params = tag_next (params); + *tmp = params; +} +#endif + +#ifdef CONFIG_REVISION_TAG +void setup_revision_tag(struct tag **in_params) +{ + u32 rev = 0; + u32 get_board_rev(void); + + rev = get_board_rev(); + params->hdr.tag = ATAG_REVISION; + params->hdr.size = tag_size (tag_revision); + params->u.revision.rev = rev; + params = tag_next (params); +} +#endif /* CONFIG_REVISION_TAG */ + +static void setup_end_tag (bd_t *bd) +{ + params->hdr.tag = ATAG_NONE; + params->hdr.size = 0; +} +#endif /* CONFIG_SETUP_MEMORY_TAGS || CONFIG_CMDLINE_TAG || CONFIG_INITRD_TAG */ + +static ulong get_sp(void) +{ + ulong ret; + + asm("mov %0, sp" : "=r"(ret) : ); + return ret; +} diff --git a/src/arch/armv7/lib/c_start.S b/src/arch/armv7/lib/c_start.S new file mode 100644 index 0000000000..b6e746299d --- /dev/null +++ b/src/arch/armv7/lib/c_start.S @@ -0,0 +1,9 @@ +.section ".text" +.globl _start +_start: + bl _hardwaremain + +_hardwaremain: .word hardwaremain +@ .word hardwaremain + + diff --git a/src/arch/armv7/lib/cache-cp15.c b/src/arch/armv7/lib/cache-cp15.c new file mode 100644 index 0000000000..1786725148 --- /dev/null +++ b/src/arch/armv7/lib/cache-cp15.c @@ -0,0 +1,304 @@ +/* + * (C) Copyright 2002 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* FIXME(dhendrix): clean-up weak symbols if it looks unlikely we'll + want to override them with anything other than what's in cache_v7. */ +#include +#include +#include +#include + +#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) + +DECLARE_GLOBAL_DATA_PTR; + +#if 0 +void __arm_init_before_mmu(void) +{ +} +void arm_init_before_mmu(void) + __attribute__((weak, alias("__arm_init_before_mmu"))); +#endif + +static void cp_delay (void) +{ + volatile int i; + + /* copro seems to need some delay between reading and writing */ + for (i = 0; i < 100; i++) + nop(); + asm volatile("" : : : "memory"); +} + +static void set_section_dcache(int section, enum dcache_option option) +{ + u32 value = section << MMU_SECTION_SHIFT | (3 << 10); +// u32 *page_table = (u32 *)gd->tlb_addr; + u32 *page_table; + unsigned int tlb_addr; + unsigned int tlb_size = 4096 * 4; + + /* + * FIXME(dhendrix): This calculation is from arch/arm/lib/board.c + * in u-boot. We may need to subtract more due to logging. + */ + tlb_addr = (CONFIG_SYS_SDRAM_BASE + (CONFIG_DRAM_SIZE_MB << 20UL)); + tlb_addr -= tlb_size; + /* round down to next 64KB limit */ + tlb_addr &= ~(0x10000 - 1); + page_table = (u32 *)tlb_addr; + + switch (option) { + case DCACHE_WRITETHROUGH: + value |= 0x1a; + break; + + case DCACHE_WRITEBACK: + value |= 0x1e; + break; + + case DCACHE_OFF: + value |= 0x12; + break; + } + + page_table[section] = value; +} + +#if 0 +void __mmu_page_table_flush(unsigned long start, unsigned long stop) +{ + debug("%s: Warning: not implemented\n", __func__); +} +#endif + +#if 0 +void mmu_page_table_flush(unsigned long start, unsigned long stop) + __attribute__((weak, alias("__mmu_page_table_flush"))); +#endif + +void mmu_set_region_dcache(unsigned long start, int size, enum dcache_option option) +{ + u32 *page_table = (u32 *)gd->tlb_addr; + u32 upto, end; + + end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; + start = start >> MMU_SECTION_SHIFT; + debug("mmu_set_region_dcache start=%x, size=%x, option=%d\n", + start, size, option); + for (upto = start; upto < end; upto++) + set_section_dcache(upto, option); + mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]); +} + +#if 0 +static inline void dram_bank_mmu_setup(int bank) +{ +// bd_t *bd = gd->bd; + int i; + + debug("%s: bank: %d\n", __func__, bank); + for (i = bd->bi_dram[bank].start >> 20; + i < (bd->bi_dram[bank].start + bd->bi_dram[bank].size) >> 20; + i++) { +#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) + set_section_dcache(i, DCACHE_WRITETHROUGH); +#else + set_section_dcache(i, DCACHE_WRITEBACK); +#endif + } +} +#endif + +/* FIXME(dhendrix): modified to take arguments from the caller (mainboard's + romstage.c) so it doesn't rely on global data struct */ +/** + * dram_bank_mmu_set - set up the data cache policy for a given dram bank + * + * @start: virtual address start of bank + * @size: size of bank (in bytes) + */ +inline void dram_bank_mmu_setup(unsigned long start, unsigned long size) +{ + int i; + + debug("%s: bank: %d\n", __func__, bank); + for (i = start >> 20; i < (start + size) >> 20; i++) { +#if defined(CONFIG_ARM_DCACHE_POLICY_WRITEBACK) + set_section_dcache(i, DCACHE_WRITEBACK); +#elif defined(CONFIG_ARM_DCACHE_POLICY_WRITETHROUGH) + set_section_dcache(i, DCACHE_WRITETHROUGH); +#else +#error "Must define dcache policy." +#endif + } +} + +/* to activate the MMU we need to set up virtual memory: use 1M areas */ +static inline void mmu_setup(void) +{ + int i; + u32 reg; + + arm_init_before_mmu(); + /* Set up an identity-mapping for all 4GB, rw for everyone */ + for (i = 0; i < 4096; i++) + set_section_dcache(i, DCACHE_OFF); + + /* FIXME(dhendrix): u-boot's global data struct was used here... */ +#if 0 + for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { + dram_bank_mmu_setup(i); + } +#endif +#if 0 + /* comes from board's romstage.c, since we need to know which + ranges to setup */ + mainboard_setup_mmu(); +#endif + dram_bank_mmu_setup(CONFIG_SYS_SDRAM_BASE, CONFIG_DRAM_SIZE_MB << 20); + + /* Copy the page table address to cp15 */ + asm volatile("mcr p15, 0, %0, c2, c0, 0" + : : "r" (gd->tlb_addr) : "memory"); + /* Set the access control to all-supervisor */ + asm volatile("mcr p15, 0, %0, c3, c0, 0" + : : "r" (~0)); + /* and enable the mmu */ + reg = get_cr(); /* get control reg. */ + cp_delay(); + set_cr(reg | CR_M); +} + +static int mmu_enabled(void) +{ + return get_cr() & CR_M; +} + +/* cache_bit must be either CR_I or CR_C */ +static void cache_enable(uint32_t cache_bit) +{ + uint32_t reg; + + /* The data cache is not active unless the mmu is enabled too */ + if ((cache_bit == CR_C) && !mmu_enabled()) + mmu_setup(); + reg = get_cr(); /* get control reg. */ + cp_delay(); + set_cr(reg | cache_bit); +} + +/* + * Big hack warning! + * + * Devs like to compile with -O0 to get a nice debugging illusion. But this + * function does not survive that since -O0 causes the compiler to read the + * PC back from the stack after the dcache flush. Might it be possible to fix + * this by flushing the write buffer? + */ +static void cache_disable(uint32_t cache_bit) __attribute__ ((optimize(2))); + +/* cache_bit must be either CR_I or CR_C */ +static void cache_disable(uint32_t cache_bit) +{ + uint32_t reg; + + if (cache_bit == CR_C) { + /* if cache isn;t enabled no need to disable */ + reg = get_cr(); + if ((reg & CR_C) != CR_C) + return; + /* if disabling data cache, disable mmu too */ + cache_bit |= CR_M; + } + reg = get_cr(); + cp_delay(); + if (cache_bit == (CR_C | CR_M)) + flush_dcache_all(); + set_cr(reg & ~cache_bit); +} +#endif + +#ifdef CONFIG_SYS_ICACHE_OFF +void icache_enable (void) +{ + return; +} + +void icache_disable (void) +{ + return; +} + +int icache_status (void) +{ + return 0; /* always off */ +} +#else +void icache_enable(void) +{ + cache_enable(CR_I); +} + +void icache_disable(void) +{ + cache_disable(CR_I); +} + +int icache_status(void) +{ + return (get_cr() & CR_I) != 0; +} +#endif + +#ifdef CONFIG_SYS_DCACHE_OFF +void dcache_enable (void) +{ + return; +} + +void dcache_disable (void) +{ + return; +} + +int dcache_status (void) +{ + return 0; /* always off */ +} +#else +void dcache_enable(void) +{ + cache_enable(CR_C); +} + +void dcache_disable(void) +{ + cache_disable(CR_C); +} + +int dcache_status(void) +{ + return (get_cr() & CR_C) != 0; +} +#endif diff --git a/src/arch/armv7/lib/cache_v7.c b/src/arch/armv7/lib/cache_v7.c new file mode 100644 index 0000000000..13bb59f025 --- /dev/null +++ b/src/arch/armv7/lib/cache_v7.c @@ -0,0 +1,427 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, + * Aneesh V + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#include +#include +#include +#include +#include + +#define ARMV7_DCACHE_INVAL_ALL 1 +#define ARMV7_DCACHE_CLEAN_INVAL_ALL 2 +#define ARMV7_DCACHE_INVAL_RANGE 3 +#define ARMV7_DCACHE_CLEAN_INVAL_RANGE 4 + +#ifndef CONFIG_SYS_DCACHE_OFF +/* + * Write the level and type you want to Cache Size Selection Register(CSSELR) + * to get size details from Current Cache Size ID Register(CCSIDR) + */ +static void set_csselr(u32 level, u32 type) +{ u32 csselr = level << 1 | type; + + /* Write to Cache Size Selection Register(CSSELR) */ + asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); +} + +static u32 get_ccsidr(void) +{ + u32 ccsidr; + + /* Read current CP15 Cache Size ID Register */ + asm volatile ("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); + return ccsidr; +} + +static u32 get_clidr(void) +{ + u32 clidr; + + /* Read current CP15 Cache Level ID Register */ + asm volatile ("mrc p15,1,%0,c0,c0,1" : "=r" (clidr)); + return clidr; +} + +static void v7_inval_dcache_level_setway(u32 level, u32 num_sets, + u32 num_ways, u32 way_shift, + u32 log2_line_len) +{ + int way, set, setway; + + /* + * For optimal assembly code: + * a. count down + * b. have bigger loop inside + */ + for (way = num_ways - 1; way >= 0 ; way--) { + for (set = num_sets - 1; set >= 0; set--) { + setway = (level << 1) | (set << log2_line_len) | + (way << way_shift); + /* Invalidate data/unified cache line by set/way */ + asm volatile (" mcr p15, 0, %0, c7, c6, 2" + : : "r" (setway)); + } + } + /* DSB to make sure the operation is complete */ + CP15DSB; +} + +static void v7_clean_inval_dcache_level_setway(u32 level, u32 num_sets, + u32 num_ways, u32 way_shift, + u32 log2_line_len) +{ + int way, set, setway; + + /* + * For optimal assembly code: + * a. count down + * b. have bigger loop inside + */ + for (way = num_ways - 1; way >= 0 ; way--) { + for (set = num_sets - 1; set >= 0; set--) { + setway = (level << 1) | (set << log2_line_len) | + (way << way_shift); + /* + * Clean & Invalidate data/unified + * cache line by set/way + */ + asm volatile (" mcr p15, 0, %0, c7, c14, 2" + : : "r" (setway)); + } + } + /* DSB to make sure the operation is complete */ + CP15DSB; +} + +static void v7_maint_dcache_level_setway(u32 level, u32 operation) +{ + u32 ccsidr; + u32 num_sets, num_ways, log2_line_len, log2_num_ways; + u32 way_shift; + + set_csselr(level, ARMV7_CSSELR_IND_DATA_UNIFIED); + + ccsidr = get_ccsidr(); + + log2_line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> + CCSIDR_LINE_SIZE_OFFSET) + 2; + /* Converting from words to bytes */ + log2_line_len += 2; + + num_ways = ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> + CCSIDR_ASSOCIATIVITY_OFFSET) + 1; + num_sets = ((ccsidr & CCSIDR_NUM_SETS_MASK) >> + CCSIDR_NUM_SETS_OFFSET) + 1; + /* + * According to ARMv7 ARM number of sets and number of ways need + * not be a power of 2 + */ + log2_num_ways = log_2_n_round_up(num_ways); + + way_shift = (32 - log2_num_ways); + if (operation == ARMV7_DCACHE_INVAL_ALL) { + v7_inval_dcache_level_setway(level, num_sets, num_ways, + way_shift, log2_line_len); + } else if (operation == ARMV7_DCACHE_CLEAN_INVAL_ALL) { + v7_clean_inval_dcache_level_setway(level, num_sets, num_ways, + way_shift, log2_line_len); + } +} + +static void v7_maint_dcache_all(u32 operation) +{ + u32 level, cache_type, level_start_bit = 0; + + u32 clidr = get_clidr(); + + for (level = 0; level < 7; level++) { + cache_type = (clidr >> level_start_bit) & 0x7; + if ((cache_type == ARMV7_CLIDR_CTYPE_DATA_ONLY) || + (cache_type == ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA) || + (cache_type == ARMV7_CLIDR_CTYPE_UNIFIED)) + v7_maint_dcache_level_setway(level, operation); + level_start_bit += 3; + } +} + +static void v7_dcache_clean_inval_range(u32 start, + u32 stop, u32 line_len) +{ + u32 mva; + + /* Align start to cache line boundary */ + start &= ~(line_len - 1); + for (mva = start; mva < stop; mva = mva + line_len) { + /* DCCIMVAC - Clean & Invalidate data cache by MVA to PoC */ + asm volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" (mva)); + } +} + +static void v7_dcache_inval_range(u32 start, u32 stop, u32 line_len) +{ + u32 mva; + + /* + * If start address is not aligned to cache-line do not + * invalidate the first cache-line + */ + if (start & (line_len - 1)) { + printk(BIOS_ERR, "%s - start address is not aligned - 0x%08x\n", + __func__, start); + /* move to next cache line */ + start = (start + line_len - 1) & ~(line_len - 1); + } + + /* + * If stop address is not aligned to cache-line do not + * invalidate the last cache-line + */ + if (stop & (line_len - 1)) { + printk(BIOS_ERR, "%s - stop address is not aligned - 0x%08x\n", + __func__, stop); + /* align to the beginning of this cache line */ + stop &= ~(line_len - 1); + } + + for (mva = start; mva < stop; mva = mva + line_len) { + /* DCIMVAC - Invalidate data cache by MVA to PoC */ + asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (mva)); + } +} + +static void v7_dcache_maint_range(u32 start, u32 stop, u32 range_op) +{ + u32 line_len = dcache_get_line_size(); + + switch (range_op) { + case ARMV7_DCACHE_CLEAN_INVAL_RANGE: + v7_dcache_clean_inval_range(start, stop, line_len); + break; + case ARMV7_DCACHE_INVAL_RANGE: + v7_dcache_inval_range(start, stop, line_len); + break; + } + + /* DSB to make sure the operation is complete */ + CP15DSB; +} + +/* Invalidate TLB */ +static void v7_inval_tlb(void) +{ + /* Invalidate entire unified TLB */ + asm volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0)); + /* Invalidate entire data TLB */ + asm volatile ("mcr p15, 0, %0, c8, c6, 0" : : "r" (0)); + /* Invalidate entire instruction TLB */ + asm volatile ("mcr p15, 0, %0, c8, c5, 0" : : "r" (0)); + /* Full system DSB - make sure that the invalidation is complete */ + CP15DSB; + /* Full system ISB - make sure the instruction stream sees it */ + CP15ISB; +} + +ulong dcache_get_line_size(void) +{ + u32 line_len, ccsidr; + + ccsidr = get_ccsidr(); + line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> + CCSIDR_LINE_SIZE_OFFSET) + 2; + /* Converting from words to bytes */ + line_len += 2; + /* converting from log2(linelen) to linelen */ + line_len = 1 << line_len; + + return line_len; +} + +void invalidate_dcache_all(void) +{ + v7_maint_dcache_all(ARMV7_DCACHE_INVAL_ALL); + + v7_outer_cache_inval_all(); +} + +/* + * Performs a clean & invalidation of the entire data cache + * at all levels + */ +void flush_dcache_all(void) +{ + v7_maint_dcache_all(ARMV7_DCACHE_CLEAN_INVAL_ALL); + + v7_outer_cache_flush_all(); +} + +/* + * Invalidates range in all levels of D-cache/unified cache used: + * Affects the range [start, stop - 1] + */ +void invalidate_dcache_range(unsigned long start, unsigned long stop) +{ + + v7_dcache_maint_range(start, stop, ARMV7_DCACHE_INVAL_RANGE); + + v7_outer_cache_inval_range(start, stop); +} + +/* + * Flush range(clean & invalidate) from all levels of D-cache/unified + * cache used: + * Affects the range [start, stop - 1] + */ +void flush_dcache_range(unsigned long start, unsigned long stop) +{ + v7_dcache_maint_range(start, stop, ARMV7_DCACHE_CLEAN_INVAL_RANGE); + + v7_outer_cache_flush_range(start, stop); +} + +void arm_init_before_mmu(void) +{ + v7_outer_cache_enable(); + invalidate_dcache_all(); + v7_inval_tlb(); +} + +void mmu_page_table_flush(unsigned long start, unsigned long stop) +{ + flush_dcache_range(start, stop); + v7_inval_tlb(); +} + +/* + * Flush range from all levels of d-cache/unified-cache used: + * Affects the range [start, start + size - 1] + */ +void flush_cache(unsigned long start, unsigned long size) +{ + flush_dcache_range(start, start + size); +} +#else /* #ifndef CONFIG_SYS_DCACHE_OFF */ +ulong dcache_get_line_size(void) +{ + return 0; +} + +void invalidate_dcache_all(void) +{ +} + +void flush_dcache_all(void) +{ +} + +void invalidate_dcache_range(unsigned long start, unsigned long stop) +{ +} + +void flush_dcache_range(unsigned long start, unsigned long stop) +{ +} + +void arm_init_before_mmu(void) +{ +} + +void flush_cache(unsigned long start, unsigned long size) +{ +} + +void mmu_page_table_flush(unsigned long start, unsigned long stop) +{ +} + +#endif /* #ifndef CONFIG_SYS_DCACHE_OFF */ + +#ifndef CONFIG_SYS_ICACHE_OFF +/* Invalidate entire I-cache and branch predictor array */ +void invalidate_icache_all(void) +{ + /* + * Invalidate all instruction caches to PoU. + * Also flushes branch target cache. + */ + asm volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); + + /* Invalidate entire branch predictor array */ + asm volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0)); + + /* Full system DSB - make sure that the invalidation is complete */ + CP15DSB; + + /* ISB - make sure the instruction stream sees it */ + CP15ISB; +} +#else +void invalidate_icache_all(void) +{ +} +#endif + +/* + * FIXME(dhendrix): had unexplainable compilation failure of weak symbols + * (in spite of having prototypes and whatnot)... ron's advice is "death + * to weak symbols!" + */ +#if 0 +/* + * Stub implementations for outer cache operations + */ +void __v7_outer_cache_enable(void) +{ +} +void v7_outer_cache_enable(void) + __attribute__((weak, alias("__v7_outer_cache_enable"))); + +void __v7_outer_cache_disable(void) +{ +} +void v7_outer_cache_disable(void) + __attribute__((weak, alias("__v7_outer_cache_disable"))); + +void __v7_outer_cache_flush_all(void) +{ +} +void v7_outer_cache_flush_all(void) + __attribute__((weak, alias("__v7_outer_cache_flush_all"))); + +void __v7_outer_cache_inval_all(void) +{ +} +void v7_outer_cache_inval_all(void) + __attribute__((weak, alias("__v7_outer_cache_inval_all"))); + +void __v7_outer_cache_flush_range(u32 start, u32 end) +{ +} +void v7_outer_cache_flush_range(u32 start, u32 end) + __attribute__((weak, alias("__v7_outer_cache_flush_range"))); + +void __v7_outer_cache_inval_range(u32 start, u32 end) +{ +} +void v7_outer_cache_inval_range(u32 start, u32 end) + __attribute__((weak, alias("__v7_outer_cache_inval_range"))); +#endif diff --git a/src/arch/armv7/lib/div.c b/src/arch/armv7/lib/div.c new file mode 100644 index 0000000000..03352dc7a5 --- /dev/null +++ b/src/arch/armv7/lib/div.c @@ -0,0 +1,5 @@ +void __div0(void); // called from asm so no need for a prototype in a header +void __div0(void) +{ + // div by zero +} diff --git a/src/arch/armv7/lib/div0.c b/src/arch/armv7/lib/div0.c new file mode 100644 index 0000000000..632c247b5c --- /dev/null +++ b/src/arch/armv7/lib/div0.c @@ -0,0 +1,32 @@ +/* + * (C) Copyright 2002 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +void __div0(void); // called from asm so no need for a prototype in a header + +/* Replacement (=dummy) for GNU/Linux division-by zero handler */ +void __div0 (void) +{ + extern void hang (void); + + hang(); +} diff --git a/src/arch/armv7/lib/div64.S b/src/arch/armv7/lib/div64.S new file mode 100644 index 0000000000..44edf480ca --- /dev/null +++ b/src/arch/armv7/lib/div64.S @@ -0,0 +1,208 @@ +/* + * linux/arch/arm/lib/div64.S + * + * Optimized computation of 64-bit dividend / 32-bit divisor + * + * Author: Nicolas Pitre + * Created: Oct 5, 2003 + * Copyright: Monta Vista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +// FIXME +//#include +#define __LINUX_ARM_ARCH__ 7 + +#ifdef __ARMEB__ +#define xh r0 +#define xl r1 +#define yh r2 +#define yl r3 +#else +#define xl r0 +#define xh r1 +#define yl r2 +#define yh r3 +#endif + +/* + * __do_div64: perform a division with 64-bit dividend and 32-bit divisor. + * + * Note: Calling convention is totally non standard for optimal code. + * This is meant to be used by do_div() from include/asm/div64.h only. + * + * Input parameters: + * xh-xl = dividend (clobbered) + * r4 = divisor (preserved) + * + * Output values: + * yh-yl = result + * xh = remainder + * + * Clobbered regs: xl, ip + */ + + +.globl __do_div64; +.align 4,0x90 +__do_div64: + + @ Test for easy paths first. + subs ip, r4, #1 + bls 9f @ divisor is 0 or 1 + tst ip, r4 + beq 8f @ divisor is power of 2 + + @ See if we need to handle upper 32-bit result. + cmp xh, r4 + mov yh, #0 + blo 3f + + @ Align divisor with upper part of dividend. + @ The aligned divisor is stored in yl preserving the original. + @ The bit position is stored in ip. + +#if __LINUX_ARM_ARCH__ >= 5 + + clz yl, r4 + clz ip, xh + sub yl, yl, ip + mov ip, #1 + mov ip, ip, lsl yl + mov yl, r4, lsl yl + +#else + + mov yl, r4 + mov ip, #1 +1: cmp yl, #0x80000000 + cmpcc yl, xh + movcc yl, yl, lsl #1 + movcc ip, ip, lsl #1 + bcc 1b + +#endif + + @ The division loop for needed upper bit positions. + @ Break out early if dividend reaches 0. +2: cmp xh, yl + orrcs yh, yh, ip + subcss xh, xh, yl + movnes ip, ip, lsr #1 + mov yl, yl, lsr #1 + bne 2b + + @ See if we need to handle lower 32-bit result. +3: cmp xh, #0 + mov yl, #0 + cmpeq xl, r4 + movlo xh, xl + movlo pc, lr + + @ The division loop for lower bit positions. + @ Here we shift remainer bits leftwards rather than moving the + @ divisor for comparisons, considering the carry-out bit as well. + mov ip, #0x80000000 +4: movs xl, xl, lsl #1 + adcs xh, xh, xh + beq 6f + cmpcc xh, r4 +5: orrcs yl, yl, ip + subcs xh, xh, r4 + movs ip, ip, lsr #1 + bne 4b + mov pc, lr + + @ The top part of remainder became zero. If carry is set + @ (the 33th bit) this is a false positive so resume the loop. + @ Otherwise, if lower part is also null then we are done. +6: bcs 5b + cmp xl, #0 + moveq pc, lr + + @ We still have remainer bits in the low part. Bring them up. + +#if __LINUX_ARM_ARCH__ >= 5 + + clz xh, xl @ we know xh is zero here so... + add xh, xh, #1 + mov xl, xl, lsl xh + mov ip, ip, lsr xh + +#else + +7: movs xl, xl, lsl #1 + mov ip, ip, lsr #1 + bcc 7b + +#endif + + @ Current remainder is now 1. It is worthless to compare with + @ divisor at this point since divisor can not be smaller than 3 here. + @ If possible, branch for another shift in the division loop. + @ If no bit position left then we are done. + movs ip, ip, lsr #1 + mov xh, #1 + bne 4b + mov pc, lr + +8: @ Division by a power of 2: determine what that divisor order is + @ then simply shift values around + +#if __LINUX_ARM_ARCH__ >= 5 + + clz ip, r4 + rsb ip, ip, #31 + +#else + + mov yl, r4 + cmp r4, #(1 << 16) + mov ip, #0 + movhs yl, yl, lsr #16 + movhs ip, #16 + + cmp yl, #(1 << 8) + movhs yl, yl, lsr #8 + addhs ip, ip, #8 + + cmp yl, #(1 << 4) + movhs yl, yl, lsr #4 + addhs ip, ip, #4 + + cmp yl, #(1 << 2) + addhi ip, ip, #3 + addls ip, ip, yl, lsr #1 + +#endif + + mov yh, xh, lsr ip + mov yl, xl, lsr ip + rsb ip, ip, #32 + orr yl, yl, xh, lsl ip + mov xh, xl, lsl ip + mov xh, xh, lsr ip + mov pc, lr + + @ eq -> division by 1: obvious enough... +9: moveq yl, xl + moveq yh, xh + moveq xh, #0 + moveq pc, lr + + @ Division by 0: + str lr, [sp, #-8]! + bl __div0 + + @ as wrong as it could be... + mov yl, #0 + mov yh, #0 + mov xh, #0 + ldr pc, [sp], #8 + +@.type __do_div64, @function; +@.size __do_div64, .-__do_div64, + diff --git a/src/arch/armv7/lib/eabi_compat.c b/src/arch/armv7/lib/eabi_compat.c new file mode 100644 index 0000000000..5f59892432 --- /dev/null +++ b/src/arch/armv7/lib/eabi_compat.c @@ -0,0 +1,33 @@ +/* + * Utility functions needed for (some) EABI conformant tool chains. + * + * (C) Copyright 2009 Wolfgang Denk + * + * This program is Free Software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#include + +/* FIXME(dhendrix): prototypes added for assembler */ +int raise (int signum); +int raise (int signum) +{ +#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) + printf("raise: Signal # %d caught\n", signum); +#endif + return 0; +} + +/* Dummy function to avoid linker complaints */ +void __aeabi_unwind_cpp_pr0(void); +void __aeabi_unwind_cpp_pr0(void) +{ +}; + +void __aeabi_unwind_cpp_pr1(void); +void __aeabi_unwind_cpp_pr1(void) +{ +}; diff --git a/src/arch/armv7/lib/hang_spl.c b/src/arch/armv7/lib/hang_spl.c new file mode 100644 index 0000000000..630a57af47 --- /dev/null +++ b/src/arch/armv7/lib/hang_spl.c @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + */ + +#include + +void hang(void) +{ + for (;;); +} diff --git a/src/arch/armv7/lib/id.inc b/src/arch/armv7/lib/id.inc new file mode 100644 index 0000000000..4da7024233 --- /dev/null +++ b/src/arch/armv7/lib/id.inc @@ -0,0 +1,18 @@ + .section ".id", "a", %progbits + + .globl __id_start +__id_start: +ver: + .asciz COREBOOT_VERSION +vendor: + .asciz CONFIG_MAINBOARD_VENDOR +part: + .asciz CONFIG_MAINBOARD_PART_NUMBER +.long __id_end + CONFIG_ID_SECTION_OFFSET - ver /* Reverse offset to the vendor id */ +.long __id_end + CONFIG_ID_SECTION_OFFSET - vendor /* Reverse offset to the vendor id */ +.long __id_end + CONFIG_ID_SECTION_OFFSET - part /* Reverse offset to the part number */ +.long CONFIG_ROM_SIZE /* Size of this romimage */ + .globl __id_end + +__id_end: +.previous diff --git a/src/arch/armv7/lib/id.lds b/src/arch/armv7/lib/id.lds new file mode 100644 index 0000000000..9e31ee615c --- /dev/null +++ b/src/arch/armv7/lib/id.lds @@ -0,0 +1,6 @@ +SECTIONS { + . = (0x100000000 - CONFIG_ID_SECTION_OFFSET) - (__id_end - __id_start); + .id (.): { + *(.id) + } +} diff --git a/src/arch/armv7/lib/interrupts.c b/src/arch/armv7/lib/interrupts.c new file mode 100644 index 0000000000..6ee3f267ef --- /dev/null +++ b/src/arch/armv7/lib/interrupts.c @@ -0,0 +1,198 @@ +/* + * (C) Copyright 2003 + * Texas Instruments + * + * (C) Copyright 2002 + * Sysgo Real-Time Solutions, GmbH + * Marius Groeger + * + * (C) Copyright 2002 + * Sysgo Real-Time Solutions, GmbH + * Alex Zuepke + * + * (C) Copyright 2002-2004 + * Gary Jennejohn, DENX Software Engineering, + * + * (C) Copyright 2004 + * Philippe Robin, ARM Ltd. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include +//#include + +DECLARE_GLOBAL_DATA_PTR; + +#ifdef CONFIG_USE_IRQ +int interrupt_init (void) +{ + /* + * setup up stacks if necessary + */ + IRQ_STACK_START = gd->irq_sp - 4; + IRQ_STACK_START_IN = gd->irq_sp + 8; + FIQ_STACK_START = IRQ_STACK_START - CONFIG_STACKSIZE_IRQ; + + return arch_interrupt_init(); +} + +/* enable IRQ interrupts */ +void enable_interrupts (void) +{ + unsigned long temp; + __asm__ __volatile__("mrs %0, cpsr\n" + "bic %0, %0, #0x80\n" + "msr cpsr_c, %0" + : "=r" (temp) + : + : "memory"); +} + + +/* + * disable IRQ/FIQ interrupts + * returns true if interrupts had been enabled before we disabled them + */ +int disable_interrupts (void) +{ + unsigned long old,temp; + __asm__ __volatile__("mrs %0, cpsr\n" + "orr %1, %0, #0xc0\n" + "msr cpsr_c, %1" + : "=r" (old), "=r" (temp) + : + : "memory"); + return (old & 0x80) == 0; +} +#else +int interrupt_init (void) +{ + /* + * setup up stacks if necessary + */ + IRQ_STACK_START_IN = gd->irq_sp + 8; + + return 0; +} + +void enable_interrupts (void) +{ + return; +} +int disable_interrupts (void) +{ + return 0; +} +#endif + + +void bad_mode (void) +{ + panic ("Resetting CPU ...\n"); + reset_cpu (0); +} + +void show_regs (struct pt_regs *regs) +{ + unsigned long flags; + const char *processor_modes[] = { + "USER_26", "FIQ_26", "IRQ_26", "SVC_26", + "UK4_26", "UK5_26", "UK6_26", "UK7_26", + "UK8_26", "UK9_26", "UK10_26", "UK11_26", + "UK12_26", "UK13_26", "UK14_26", "UK15_26", + "USER_32", "FIQ_32", "IRQ_32", "SVC_32", + "UK4_32", "UK5_32", "UK6_32", "ABT_32", + "UK8_32", "UK9_32", "UK10_32", "UND_32", + "UK12_32", "UK13_32", "UK14_32", "SYS_32", + }; + + flags = condition_codes (regs); + + printf ("pc : [<%08lx>] lr : [<%08lx>]\n" + "sp : %08lx ip : %08lx fp : %08lx\n", + instruction_pointer (regs), + regs->ARM_lr, regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); + printf ("r10: %08lx r9 : %08lx r8 : %08lx\n", + regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); + printf ("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); + printf ("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); + printf ("Flags: %c%c%c%c", + flags & CC_N_BIT ? 'N' : 'n', + flags & CC_Z_BIT ? 'Z' : 'z', + flags & CC_C_BIT ? 'C' : 'c', flags & CC_V_BIT ? 'V' : 'v'); + printf (" IRQs %s FIQs %s Mode %s%s\n", + interrupts_enabled (regs) ? "on" : "off", + fast_interrupts_enabled (regs) ? "on" : "off", + processor_modes[processor_mode (regs)], + thumb_mode (regs) ? " (T)" : ""); +} + +void do_undefined_instruction (struct pt_regs *pt_regs) +{ + printf ("undefined instruction\n"); + show_regs (pt_regs); + bad_mode (); +} + +void do_software_interrupt (struct pt_regs *pt_regs) +{ + printf ("software interrupt\n"); + show_regs (pt_regs); + bad_mode (); +} + +void do_prefetch_abort (struct pt_regs *pt_regs) +{ + printf ("prefetch abort\n"); + show_regs (pt_regs); + bad_mode (); +} + +void do_data_abort (struct pt_regs *pt_regs) +{ + printf ("data abort\n"); + show_regs (pt_regs); + bad_mode (); +} + +void do_not_used (struct pt_regs *pt_regs) +{ + printf ("not used\n"); + show_regs (pt_regs); + bad_mode (); +} + +void do_fiq (struct pt_regs *pt_regs) +{ + printf ("fast interrupt request\n"); + show_regs (pt_regs); + bad_mode (); +} + +#ifndef CONFIG_USE_IRQ +void do_irq (struct pt_regs *pt_regs) +{ + printf ("interrupt request\n"); + show_regs (pt_regs); + bad_mode (); +} +#endif diff --git a/src/arch/armv7/lib/memcpy.S b/src/arch/armv7/lib/memcpy.S new file mode 100644 index 0000000000..f655256b5d --- /dev/null +++ b/src/arch/armv7/lib/memcpy.S @@ -0,0 +1,243 @@ +/* + * linux/arch/arm/lib/memcpy.S + * + * Author: Nicolas Pitre + * Created: Sep 28, 2005 + * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#define W(instr) instr + +#define LDR1W_SHIFT 0 +#define STR1W_SHIFT 0 + + .macro ldr1w ptr reg abort + W(ldr) \reg, [\ptr], #4 + .endm + + .macro ldr4w ptr reg1 reg2 reg3 reg4 abort + ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4} + .endm + + .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort + ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8} + .endm + + .macro ldr1b ptr reg cond=al abort + ldr\cond\()b \reg, [\ptr], #1 + .endm + + .macro str1w ptr reg abort + W(str) \reg, [\ptr], #4 + .endm + + .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort + stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8} + .endm + + .macro str1b ptr reg cond=al abort + str\cond\()b \reg, [\ptr], #1 + .endm + + .macro enter reg1 reg2 + stmdb sp!, {r0, \reg1, \reg2} + .endm + + .macro exit reg1 reg2 + ldmfd sp!, {r0, \reg1, \reg2} + .endm + + .text + +/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ + +.globl memcpy +memcpy: + + cmp r0, r1 + moveq pc, lr + + enter r4, lr + + subs r2, r2, #4 + blt 8f + ands ip, r0, #3 + PLD( pld [r1, #0] ) + bne 9f + ands ip, r1, #3 + bne 10f + +1: subs r2, r2, #(28) + stmfd sp!, {r5 - r8} + blt 5f + + CALGN( ands ip, r0, #31 ) + CALGN( rsb r3, ip, #32 ) + CALGN( sbcnes r4, r3, r2 ) @ C is always set here + CALGN( bcs 2f ) + CALGN( adr r4, 6f ) + CALGN( subs r2, r2, r3 ) @ C gets set + CALGN( add pc, r4, ip ) + + PLD( pld [r1, #0] ) +2: PLD( subs r2, r2, #96 ) + PLD( pld [r1, #28] ) + PLD( blt 4f ) + PLD( pld [r1, #60] ) + PLD( pld [r1, #92] ) + +3: PLD( pld [r1, #124] ) +4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f + subs r2, r2, #32 + str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f + bge 3b + PLD( cmn r2, #96 ) + PLD( bge 4b ) + +5: ands ip, r2, #28 + rsb ip, ip, #32 +#if LDR1W_SHIFT > 0 + lsl ip, ip, #LDR1W_SHIFT +#endif + addne pc, pc, ip @ C is always clear here + b 7f +6: + .rept (1 << LDR1W_SHIFT) + W(nop) + .endr + ldr1w r1, r3, abort=20f + ldr1w r1, r4, abort=20f + ldr1w r1, r5, abort=20f + ldr1w r1, r6, abort=20f + ldr1w r1, r7, abort=20f + ldr1w r1, r8, abort=20f + ldr1w r1, lr, abort=20f + +#if LDR1W_SHIFT < STR1W_SHIFT + lsl ip, ip, #STR1W_SHIFT - LDR1W_SHIFT +#elif LDR1W_SHIFT > STR1W_SHIFT + lsr ip, ip, #LDR1W_SHIFT - STR1W_SHIFT +#endif + add pc, pc, ip + nop + .rept (1 << STR1W_SHIFT) + W(nop) + .endr + str1w r0, r3, abort=20f + str1w r0, r4, abort=20f + str1w r0, r5, abort=20f + str1w r0, r6, abort=20f + str1w r0, r7, abort=20f + str1w r0, r8, abort=20f + str1w r0, lr, abort=20f + + CALGN( bcs 2b ) + +7: ldmfd sp!, {r5 - r8} + +8: movs r2, r2, lsl #31 + ldr1b r1, r3, ne, abort=21f + ldr1b r1, r4, cs, abort=21f + ldr1b r1, ip, cs, abort=21f + str1b r0, r3, ne, abort=21f + str1b r0, r4, cs, abort=21f + str1b r0, ip, cs, abort=21f + + exit r4, pc + +9: rsb ip, ip, #4 + cmp ip, #2 + ldr1b r1, r3, gt, abort=21f + ldr1b r1, r4, ge, abort=21f + ldr1b r1, lr, abort=21f + str1b r0, r3, gt, abort=21f + str1b r0, r4, ge, abort=21f + subs r2, r2, ip + str1b r0, lr, abort=21f + blt 8b + ands ip, r1, #3 + beq 1b + +10: bic r1, r1, #3 + cmp ip, #2 + ldr1w r1, lr, abort=21f + beq 17f + bgt 18f + + + .macro forward_copy_shift pull push + + subs r2, r2, #28 + blt 14f + + CALGN( ands ip, r0, #31 ) + CALGN( rsb ip, ip, #32 ) + CALGN( sbcnes r4, ip, r2 ) @ C is always set here + CALGN( subcc r2, r2, ip ) + CALGN( bcc 15f ) + +11: stmfd sp!, {r5 - r9} + + PLD( pld [r1, #0] ) + PLD( subs r2, r2, #96 ) + PLD( pld [r1, #28] ) + PLD( blt 13f ) + PLD( pld [r1, #60] ) + PLD( pld [r1, #92] ) + +12: PLD( pld [r1, #124] ) +13: ldr4w r1, r4, r5, r6, r7, abort=19f + mov r3, lr, pull #\pull + subs r2, r2, #32 + ldr4w r1, r8, r9, ip, lr, abort=19f + orr r3, r3, r4, push #\push + mov r4, r4, pull #\pull + orr r4, r4, r5, push #\push + mov r5, r5, pull #\pull + orr r5, r5, r6, push #\push + mov r6, r6, pull #\pull + orr r6, r6, r7, push #\push + mov r7, r7, pull #\pull + orr r7, r7, r8, push #\push + mov r8, r8, pull #\pull + orr r8, r8, r9, push #\push + mov r9, r9, pull #\pull + orr r9, r9, ip, push #\push + mov ip, ip, pull #\pull + orr ip, ip, lr, push #\push + str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f + bge 12b + PLD( cmn r2, #96 ) + PLD( bge 13b ) + + ldmfd sp!, {r5 - r9} + +14: ands ip, r2, #28 + beq 16f + +15: mov r3, lr, pull #\pull + ldr1w r1, lr, abort=21f + subs ip, ip, #4 + orr r3, r3, lr, push #\push + str1w r0, r3, abort=21f + bgt 15b + CALGN( cmp r2, #0 ) + CALGN( bge 11b ) + +16: sub r1, r1, #(\push / 8) + b 8b + + .endm + + + forward_copy_shift pull=8 push=24 + +17: forward_copy_shift pull=16 push=16 + +18: forward_copy_shift pull=24 push=8 diff --git a/src/arch/armv7/lib/memset.S b/src/arch/armv7/lib/memset.S new file mode 100644 index 0000000000..0cdf89535a --- /dev/null +++ b/src/arch/armv7/lib/memset.S @@ -0,0 +1,126 @@ +/* + * linux/arch/arm/lib/memset.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include + + .text + .align 5 + .word 0 + +1: subs r2, r2, #4 @ 1 do we have enough + blt 5f @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r1, [r0], #1 @ 1 + strleb r1, [r0], #1 @ 1 + strb r1, [r0], #1 @ 1 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) +/* + * The pointer is now aligned and the length is adjusted. Try doing the + * memset again. + */ + +.globl memset +memset: + ands r3, r0, #3 @ 1 unaligned? + bne 1b @ 1 +/* + * we know that the pointer in r0 is aligned to a word boundary. + */ + orr r1, r1, r1, lsl #8 + orr r1, r1, r1, lsl #16 + mov r3, r1 + cmp r2, #16 + blt 4f + +#if ! CALGN(1)+0 + +/* + * We need an extra register for this loop - save the return address and + * use the LR + */ + str lr, [sp, #-4]! + mov ip, r1 + mov lr, r1 + +2: subs r2, r2, #64 + stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. + stmgeia r0!, {r1, r3, ip, lr} + stmgeia r0!, {r1, r3, ip, lr} + stmgeia r0!, {r1, r3, ip, lr} + bgt 2b + ldmeqfd sp!, {pc} @ Now <64 bytes to go. +/* + * No need to correct the count; we're only testing bits from now on + */ + tst r2, #32 + stmneia r0!, {r1, r3, ip, lr} + stmneia r0!, {r1, r3, ip, lr} + tst r2, #16 + stmneia r0!, {r1, r3, ip, lr} + ldr lr, [sp], #4 + +#else + +/* + * This version aligns the destination pointer in order to write + * whole cache lines at once. + */ + + stmfd sp!, {r4-r7, lr} + mov r4, r1 + mov r5, r1 + mov r6, r1 + mov r7, r1 + mov ip, r1 + mov lr, r1 + + cmp r2, #96 + tstgt r0, #31 + ble 3f + + and ip, r0, #31 + rsb ip, ip, #32 + sub r2, r2, ip + movs ip, ip, lsl #(32 - 4) + stmcsia r0!, {r4, r5, r6, r7} + stmmiia r0!, {r4, r5} + tst ip, #(1 << 30) + mov ip, r1 + strne r1, [r0], #4 + +3: subs r2, r2, #64 + stmgeia r0!, {r1, r3-r7, ip, lr} + stmgeia r0!, {r1, r3-r7, ip, lr} + bgt 3b + ldmeqfd sp!, {r4-r7, pc} + + tst r2, #32 + stmneia r0!, {r1, r3-r7, ip, lr} + tst r2, #16 + stmneia r0!, {r4-r7} + ldmfd sp!, {r4-r7, lr} + +#endif + +4: tst r2, #8 + stmneia r0!, {r1, r3} + tst r2, #4 + strne r1, [r0], #4 +/* + * When we get here, we've got less than 4 bytes to zero. We + * may have an unaligned pointer as well. + */ +5: tst r2, #2 + strneb r1, [r0], #1 + strneb r1, [r0], #1 + tst r2, #1 + strneb r1, [r0], #1 + mov pc, lr diff --git a/src/arch/armv7/lib/reset.c b/src/arch/armv7/lib/reset.c new file mode 100644 index 0000000000..08e6acb261 --- /dev/null +++ b/src/arch/armv7/lib/reset.c @@ -0,0 +1,53 @@ +/* + * (C) Copyright 2002 + * Sysgo Real-Time Solutions, GmbH + * Marius Groeger + * + * (C) Copyright 2002 + * Sysgo Real-Time Solutions, GmbH + * Alex Zuepke + * + * (C) Copyright 2002 + * Gary Jennejohn, DENX Software Engineering, + * + * (C) Copyright 2004 + * DAVE Srl + * http://www.dave-tech.it + * http://www.wawnet.biz + * mailto:info@wawnet.biz + * + * (C) Copyright 2004 Texas Insturments + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include + +int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) +{ + puts ("resetting ...\n"); + + udelay (50000); /* wait 50 ms */ + + disable_interrupts(); + reset_cpu(0); + + /*NOTREACHED*/ + return 0; +} diff --git a/src/arch/armv7/lib/romstage_console.c b/src/arch/armv7/lib/romstage_console.c new file mode 100644 index 0000000000..ead26f3521 --- /dev/null +++ b/src/arch/armv7/lib/romstage_console.c @@ -0,0 +1,75 @@ +/* + * This file is part of the coreboot project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +#include +#include +#if CONFIG_SERIAL_CONSOLE +#include +#endif +#if CONFIG_USBDEBUG +#include +#endif + +/* FIXME: need to make console driver more generic */ +void console_tx_byte(unsigned char byte) +{ + if (byte == '\n') + console_tx_byte('\r'); + +#if CONFIG_SERIAL_CONSOLE + uart_tx_byte(byte); +#endif +#if CONFIG_USBDEBUG + usbdebug_tx_byte(0, byte); +#endif +#if CONFIG_CONSOLE_CBMEM + cbmemc_tx_byte(byte); +#endif +} + +/* FIXME(dhendrix): add this back in */ +#if 0 +static void console_tx_flush(void) +{ +#if CONFIG_CONSOLE_SERIAL + uart_tx_flush(CONFIG_CONSOLE_SERIAL_UART_ADDRESS); +#endif +#if CONFIG_USBDEBUG + usbdebug_tx_flush(0); +#endif +} +#endif + +int do_printk(int msg_level, const char *fmt, ...) +{ + va_list args; + int i; + + if (msg_level > console_loglevel) { + return 0; + } + + va_start(args, fmt); + i = vtxprintf(console_tx_byte, fmt, args); + va_end(args); + +// console_tx_flush(); + + return i; +} diff --git a/src/arch/armv7/lib/syslib.c b/src/arch/armv7/lib/syslib.c new file mode 100644 index 0000000000..6f38bf9679 --- /dev/null +++ b/src/arch/armv7/lib/syslib.c @@ -0,0 +1,70 @@ +/* + * (C) Copyright 2008 + * Texas Instruments, + * + * Richard Woodruff + * Syed Mohammed Khasim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +//#include +#include +#include /* FIXME: dumping ground for prototypes */ + +/************************************************************ + * sdelay() - simple spin loop. Will be constant time as + * its generally used in bypass conditions only. This + * is necessary until timers are accessible. + * + * not inline to increase chances its in cache when called + *************************************************************/ +void sdelay(unsigned long loops) +{ + __asm__ volatile ("1:\n" "subs %0, %1, #1\n" + "bne 1b":"=r" (loops):"0"(loops)); +} + +/***************************************************************** + * sr32 - clear & set a value in a bit range for a 32 bit address + *****************************************************************/ +void sr32(void *addr, u32 start_bit, u32 num_bits, u32 value) +{ + u32 tmp, msk = 0; + msk = 1 << num_bits; + --msk; + tmp = readl((u32)addr) & ~(msk << start_bit); + tmp |= value << start_bit; + writel(tmp, (u32)addr); +} + +/********************************************************************* + * wait_on_value() - common routine to allow waiting for changes in + * volatile regs. + *********************************************************************/ +u32 wait_on_value(u32 read_bit_mask, u32 match_value, void *read_addr, + u32 bound) +{ + u32 i = 0, val; + do { + ++i; + val = readl((u32)read_addr) & read_bit_mask; + if (val == match_value) + return 1; + if (i == bound) + return 0; + } while (1); +} diff --git a/src/arch/armv7/romstage.ld b/src/arch/armv7/romstage.ld new file mode 100644 index 0000000000..0a2f7f2947 --- /dev/null +++ b/src/arch/armv7/romstage.ld @@ -0,0 +1,119 @@ +/* + * Memory map: + * + * CONFIG_RAMBASE : text segment + * : rodata segment + * : data segment + * : bss segment + * : stack + * : heap + */ +/* + * Bootstrap code for the STPC Consumer + * Copyright (c) 1999 by Net Insight AB. All Rights Reserved. + */ + +/* + * Written by Johan Rydberg, based on work by Daniel Kahlin. + * Rewritten by Eric Biederman + * 2005.12 yhlu add coreboot_ram cross the vga font buffer handling + */ + +/* We use ELF as output format. So that we can debug the code in some form. */ +/* + INCLUDE ldoptions + */ + +/* + * FIXME: what exactly should these be? maybe defined on a per-CPU basis? + * FIXME 2: Somehow linker didn't like CONFIG_SPL_MAX_SIZE and CONFIG_SPL_TEXT_BASE... + */ +/* MEMORY { .sram : ORIGIN = 0x02023400, LENGTH = 0x3800 } */ +MEMORY { .sram : ORIGIN = 0x02023400, LENGTH = 0x10000 } + +/* We use ELF as output format. So that we can debug the code in some form. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") +OUTPUT_ARCH(arm) + +ENTRY(_start) + +SECTIONS +{ + . = ROMSTAGE_BASE; + + /* + .rom . : { + _rom = .; + *(.rom.text); + *(.rom.data); + *(.rodata); + *(.rodata.*); + *(.rom.data.*); + . = ALIGN(16); + _erom = .; + } + */ + + /* First we place the code and read only data (typically const declared). + * This could theoretically be placed in rom. + */ + .text : { + _text = .; + *(.text); + *(.text.*); + . = ALIGN(4); + _etext = .; + } >.sram + + .rodata : { + _rodata = .; + . = ALIGN(4); + cpu_drivers = . ; + *(.rodata.cpu_driver) + ecpu_drivers = . ; + *(.rodata) + *(.rodata.*) + /* kevinh/Ispiri - Added an align, because the objcopy tool + * incorrectly converts sections that are not long word aligned. + */ + . = ALIGN(4); + + _erodata = .; + } >.sram + /* After the code we place initialized data (typically initialized + * global variables). This gets copied into ram by startup code. + * __data_start and __data_end shows where in ram this should be placed, + * whereas __data_loadstart and __data_loadend shows where in rom to + * copy from. + */ + .data : { + _data = .; + *(.data) + _edata = .; + } >.sram + + __image_copy_end = .; + + /* bss does not contain data, it is just a space that should be zero + * initialized on startup. (typically uninitialized global variables) + * crt0.S fills between _bss and _ebss with zeroes. + */ + .bss . : { + . = ALIGN(4); + _bss = .; + *(.bss) + *(.sbss) + *(COMMON) + } >.sram + _ebss = .; + _end = .; + + /* Discard the sections we don't need/want */ + /DISCARD/ : { + *(.comment) + *(.note) + *(.comment.*) + *(.note.*) + *(.eh_frame); + } +} diff --git a/src/arch/armv7/start.S b/src/arch/armv7/start.S new file mode 100644 index 0000000000..63cac4b98e --- /dev/null +++ b/src/arch/armv7/start.S @@ -0,0 +1,543 @@ +/* + * armboot - Startup Code for OMAP3530/ARM Cortex CPU-core + * + * Copyright (c) 2004 Texas Instruments + * + * Copyright (c) 2001 Marius Gröger + * Copyright (c) 2002 Alex Züpke + * Copyright (c) 2002 Gary Jennejohn + * Copyright (c) 2003 Richard Woodruff + * Copyright (c) 2003 Kshitij + * Copyright (c) 2006-2008 Syed Mohammed Khasim + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#define __ASSEMBLY__ +#include + +.globl _start +_start: b reset + ldr pc, _undefined_instruction + ldr pc, _software_interrupt + ldr pc, _prefetch_abort + ldr pc, _data_abort + ldr pc, _not_used + ldr pc, _irq + ldr pc, _fiq +#ifdef CONFIG_SPL_BUILD +_undefined_instruction: .word _undefined_instruction +_software_interrupt: .word _software_interrupt +_prefetch_abort: .word _prefetch_abort +_data_abort: .word _data_abort +_not_used: .word _not_used +_irq: .word _irq +_fiq: .word _fiq +_pad: .word 0x12345678 /* now 16*4=64 */ +#else +_undefined_instruction: .word undefined_instruction +_software_interrupt: .word software_interrupt +_prefetch_abort: .word prefetch_abort +_data_abort: .word data_abort +_not_used: .word not_used +_irq: .word irq +_fiq: .word fiq +_pad: .word 0x12345678 /* now 16*4=64 */ +#endif /* CONFIG_SPL_BUILD */ + +.global _end_vect +_end_vect: + + .balignl 16,0xdeadbeef +/************************************************************************* + * + * Startup Code (reset vector) + * + * do important init only if we don't start from memory! + * setup Memory and board specific bits prior to relocation. + * relocate armboot to ram + * setup stack + * + *************************************************************************/ + +.globl _TEXT_BASE +_TEXT_BASE: + .word CONFIG_SYS_TEXT_BASE + +/* + * These are defined in the board-specific linker script. + */ +.globl _bss_start_ofs +_bss_start_ofs: + .word _bss - _start + +.global _image_copy_end_ofs +_image_copy_end_ofs: + .word __image_copy_end - _start + +.globl _bss_end_ofs +_bss_end_ofs: + .word _ebss - _start +# .word __bss_end__ - _start + +.globl _end_ofs +_end_ofs: + .word _end - _start + +#ifdef CONFIG_USE_IRQ +/* IRQ stack memory (calculated at run-time) */ +.globl IRQ_STACK_START +IRQ_STACK_START: + .word 0x0badc0de + +/* IRQ stack memory (calculated at run-time) */ +.globl FIQ_STACK_START +FIQ_STACK_START: + .word 0x0badc0de +#endif + +/* IRQ stack memory (calculated at run-time) + 8 bytes */ +.globl IRQ_STACK_START_IN +IRQ_STACK_START_IN: + .word 0x0badc0de + +/* + * the actual reset code + */ + +reset: +/* bl save_boot_params */ + /* + * set the cpu to SVC32 mode + */ + mrs r0, cpsr + bic r0, r0, #0x1f + orr r0, r0, #0xd3 + msr cpsr,r0 + +#if !defined(CONFIG_TEGRA2) +/* + * Setup vector: + * (OMAP4 spl TEXT_BASE is not 32 byte aligned. + * Continue to use ROM code vector only in OMAP4 spl) + */ +#if !(defined(CONFIG_OMAP44XX) && defined(CONFIG_SPL_BUILD)) + /* Set V=0 in CP15 SCTRL register - for VBAR to point to vector */ + mrc p15, 0, r0, c1, c0, 0 @ Read CP15 SCTRL Register + bic r0, #CR_V @ V = 0 + mcr p15, 0, r0, c1, c0, 0 @ Write CP15 SCTRL Register + + /* Set vector address in CP15 VBAR register */ + ldr r0, =_start + mcr p15, 0, r0, c12, c0, 0 @Set VBAR +#endif +#endif /* !Tegra2 */ + + /* the mask ROM code should have PLL and others stable */ +#ifndef CONFIG_SKIP_LOWLEVEL_INIT + /* + * FIXME(dhendrix): Do we need to explicitly disable icache/dcache + * first? See example 15-3 in Cortex-A programmers guide... + */ + bl cpu_init_cp15 + bl cpu_init_crit +#endif + +/* Set stackpointer in internal RAM to call board_init_f */ +call_board_init_f: + ldr sp, =(CONFIG_SYS_INIT_SP_ADDR) + bic sp, sp, #7 /* 8-byte alignment for ABI compliance */ + ldr r0,=0x00000000 + bl board_init_f + +/*------------------------------------------------------------------------------*/ + +/* + * void relocate_code (addr_sp, gd, addr_moni) + * + * This "function" does not return, instead it continues in RAM + * after relocating the monitor code. + * + */ + .globl relocate_code +relocate_code: + mov r4, r0 /* save addr_sp */ + mov r5, r1 /* save addr of gd */ + mov r6, r2 /* save addr of destination */ + + /* Set up the stack */ +stack_setup: + mov sp, r4 + + adr r0, _start + subs r9, r6, r0 /* r9 <- relocation offset */ + beq clear_bss /* skip relocation */ + mov r1, r6 /* r1 <- scratch for copy_loop */ + ldr r3, _image_copy_end_ofs + add r2, r0, r3 /* r2 <- source end address */ + +copy_loop: + ldmia r0!, {r9-r10} /* copy from source address [r0] */ + stmia r1!, {r9-r10} /* copy to target address [r1] */ + cmp r0, r2 /* until source end address [r2] */ + blo copy_loop + +#ifndef CONFIG_SPL_BUILD + /* + * fix .rel.dyn relocations + */ + ldr r0, _TEXT_BASE /* r0 <- Text base */ + sub r9, r6, r0 /* r9 <- relocation offset */ + ldr r10, _dynsym_start_ofs /* r10 <- sym table ofs */ + add r10, r10, r0 /* r10 <- sym table in FLASH */ + ldr r2, _rel_dyn_start_ofs /* r2 <- rel dyn start ofs */ + add r2, r2, r0 /* r2 <- rel dyn start in FLASH */ + ldr r3, _rel_dyn_end_ofs /* r3 <- rel dyn end ofs */ + add r3, r3, r0 /* r3 <- rel dyn end in FLASH */ +fixloop: + ldr r0, [r2] /* r0 <- location to fix up, IN FLASH! */ + add r0, r0, r9 /* r0 <- location to fix up in RAM */ + ldr r1, [r2, #4] + and r7, r1, #0xff + cmp r7, #23 /* relative fixup? */ + beq fixrel + cmp r7, #2 /* absolute fixup? */ + beq fixabs + /* ignore unknown type of fixup */ + b fixnext +fixabs: + /* absolute fix: set location to (offset) symbol value */ + mov r1, r1, LSR #4 /* r1 <- symbol index in .dynsym */ + add r1, r10, r1 /* r1 <- address of symbol in table */ + ldr r1, [r1, #4] /* r1 <- symbol value */ + add r1, r1, r9 /* r1 <- relocated sym addr */ + b fixnext +fixrel: + /* relative fix: increase location by offset */ + ldr r1, [r0] + add r1, r1, r9 +fixnext: + str r1, [r0] + add r2, r2, #8 /* each rel.dyn entry is 8 bytes */ + cmp r2, r3 + blo fixloop + b clear_bss +_rel_dyn_start_ofs: + .word __rel_dyn_start - _start +_rel_dyn_end_ofs: + .word __rel_dyn_end - _start +_dynsym_start_ofs: + .word __dynsym_start - _start + +#endif /* #ifndef CONFIG_SPL_BUILD */ + +clear_bss: +#ifdef CONFIG_SPL_BUILD + /* No relocation for SPL */ + ldr r0, =_bss + ldr r1, =_ebss +#else + ldr r0, _bss_start_ofs + ldr r1, _bss_end_ofs + mov r4, r6 /* reloc addr */ + add r0, r0, r4 + add r1, r1, r4 +#endif + mov r2, #0x00000000 /* clear */ + +clbss_l:str r2, [r0] /* clear loop... */ + add r0, r0, #4 + cmp r0, r1 + bne clbss_l + +/* + * We are done. Do not return, instead branch to second part of board + * initialization, now running from RAM. + */ +jump_2_ram: +/* + * If I-cache is enabled invalidate it + */ +#ifndef CONFIG_SYS_ICACHE_OFF + mcr p15, 0, r0, c7, c5, 0 @ invalidate icache + mcr p15, 0, r0, c7, c10, 4 @ DSB + mcr p15, 0, r0, c7, c5, 4 @ ISB +#endif + ldr r0, _board_init_r_ofs + adr r1, _start + add lr, r0, r1 + add lr, lr, r9 + /* setup parameters for board_init_r */ + mov r0, r5 /* gd_t */ + mov r1, r6 /* dest_addr */ + /* jump to it ... */ + mov pc, lr + +_board_init_r_ofs: + .word board_init_r - _start + +/************************************************************************* + * + * cpu_init_cp15 + * + * Setup CP15 registers (cache, MMU, TLBs). The I-cache is turned on unless + * CONFIG_SYS_ICACHE_OFF is defined. + * + *************************************************************************/ +.globl cpu_init_cp15 +cpu_init_cp15: + /* + * Invalidate L1 I/D + */ + mov r0, #0 @ set up for MCR + mcr p15, 0, r0, c8, c7, 0 @ invalidate TLBs + mcr p15, 0, r0, c7, c5, 0 @ invalidate icache + mcr p15, 0, r0, c7, c5, 6 @ invalidate BP array + mcr p15, 0, r0, c7, c10, 4 @ DSB + mcr p15, 0, r0, c7, c5, 4 @ ISB + + /* + * disable MMU stuff and caches + */ + mrc p15, 0, r0, c1, c0, 0 + bic r0, r0, #0x00002000 @ clear bits 13 (--V-) + bic r0, r0, #0x00000007 @ clear bits 2:0 (-CAM) + orr r0, r0, #0x00000002 @ set bit 1 (--A-) Align + orr r0, r0, #0x00000800 @ set bit 11 (Z---) BTB +#ifdef CONFIG_SYS_ICACHE_OFF + bic r0, r0, #0x00001000 @ clear bit 12 (I) I-cache +#else + orr r0, r0, #0x00001000 @ set bit 12 (I) I-cache +#endif + mcr p15, 0, r0, c1, c0, 0 + mov pc, lr @ back to my caller + + +#ifndef CONFIG_SKIP_LOWLEVEL_INIT +/************************************************************************* + * + * CPU_init_critical registers + * + * setup important registers + * setup memory timing + * + *************************************************************************/ +cpu_init_crit: + /* + * Jump to board specific initialization... + * The Mask ROM will have already initialized + * basic memory. Go here to bump up clock rate and handle + * wake up conditions. + */ + mov ip, lr @ persevere link reg across call + bl lowlevel_init @ go setup pll,mux,memory + mov lr, ip @ restore link + mov pc, lr @ back to my caller +#endif + +#ifndef CONFIG_SPL_BUILD +/* + ************************************************************************* + * + * Interrupt handling + * + ************************************************************************* + */ +@ +@ IRQ stack frame. +@ +#define S_FRAME_SIZE 72 + +#define S_OLD_R0 68 +#define S_PSR 64 +#define S_PC 60 +#define S_LR 56 +#define S_SP 52 + +#define S_IP 48 +#define S_FP 44 +#define S_R10 40 +#define S_R9 36 +#define S_R8 32 +#define S_R7 28 +#define S_R6 24 +#define S_R5 20 +#define S_R4 16 +#define S_R3 12 +#define S_R2 8 +#define S_R1 4 +#define S_R0 0 + +#define MODE_SVC 0x13 +#define I_BIT 0x80 + +/* + * use bad_save_user_regs for abort/prefetch/undef/swi ... + * use irq_save_user_regs / irq_restore_user_regs for IRQ/FIQ handling + */ + + .macro bad_save_user_regs + sub sp, sp, #S_FRAME_SIZE @ carve out a frame on current + @ user stack + stmia sp, {r0 - r12} @ Save user registers (now in + @ svc mode) r0-r12 + ldr r2, IRQ_STACK_START_IN @ set base 2 words into abort + @ stack + ldmia r2, {r2 - r3} @ get values for "aborted" pc + @ and cpsr (into parm regs) + add r0, sp, #S_FRAME_SIZE @ grab pointer to old stack + + add r5, sp, #S_SP + mov r1, lr + stmia r5, {r0 - r3} @ save sp_SVC, lr_SVC, pc, cpsr + mov r0, sp @ save current stack into r0 + @ (param register) + .endm + + .macro irq_save_user_regs + sub sp, sp, #S_FRAME_SIZE + stmia sp, {r0 - r12} @ Calling r0-r12 + add r8, sp, #S_PC @ !! R8 NEEDS to be saved !! + @ a reserved stack spot would + @ be good. + stmdb r8, {sp, lr}^ @ Calling SP, LR + str lr, [r8, #0] @ Save calling PC + mrs r6, spsr + str r6, [r8, #4] @ Save CPSR + str r0, [r8, #8] @ Save OLD_R0 + mov r0, sp + .endm + + .macro irq_restore_user_regs + ldmia sp, {r0 - lr}^ @ Calling r0 - lr + mov r0, r0 + ldr lr, [sp, #S_PC] @ Get PC + add sp, sp, #S_FRAME_SIZE + subs pc, lr, #4 @ return & move spsr_svc into + @ cpsr + .endm + + .macro get_bad_stack + ldr r13, IRQ_STACK_START_IN @ setup our mode stack (enter + @ in banked mode) + + str lr, [r13] @ save caller lr in position 0 + @ of saved stack + mrs lr, spsr @ get the spsr + str lr, [r13, #4] @ save spsr in position 1 of + @ saved stack + + mov r13, #MODE_SVC @ prepare SVC-Mode + @ msr spsr_c, r13 + msr spsr, r13 @ switch modes, make sure + @ moves will execute + mov lr, pc @ capture return pc + movs pc, lr @ jump to next instruction & + @ switch modes. + .endm + + .macro get_bad_stack_swi + sub r13, r13, #4 @ space on current stack for + @ scratch reg. + str r0, [r13] @ save R0's value. + ldr r0, IRQ_STACK_START_IN @ get data regions start + @ spots for abort stack + str lr, [r0] @ save caller lr in position 0 + @ of saved stack + mrs r0, spsr @ get the spsr + str lr, [r0, #4] @ save spsr in position 1 of + @ saved stack + ldr r0, [r13] @ restore r0 + add r13, r13, #4 @ pop stack entry + .endm + + .macro get_irq_stack @ setup IRQ stack + ldr sp, IRQ_STACK_START + .endm + + .macro get_fiq_stack @ setup FIQ stack + ldr sp, FIQ_STACK_START + .endm + +/* + * exception handlers + */ + .align 5 +undefined_instruction: + get_bad_stack + bad_save_user_regs + bl do_undefined_instruction + + .align 5 +software_interrupt: + get_bad_stack_swi + bad_save_user_regs + bl do_software_interrupt + + .align 5 +prefetch_abort: + get_bad_stack + bad_save_user_regs + bl do_prefetch_abort + + .align 5 +data_abort: + get_bad_stack + bad_save_user_regs + bl do_data_abort + + .align 5 +not_used: + get_bad_stack + bad_save_user_regs + bl do_not_used + +#ifdef CONFIG_USE_IRQ + + .align 5 +irq: + get_irq_stack + irq_save_user_regs + bl do_irq + irq_restore_user_regs + + .align 5 +fiq: + get_fiq_stack + /* someone ought to write a more effective fiq_save_user_regs */ + irq_save_user_regs + bl do_fiq + irq_restore_user_regs + +#else + + .align 5 +irq: + get_bad_stack + bad_save_user_regs + bl do_irq + + .align 5 +fiq: + get_bad_stack + bad_save_user_regs + bl do_fiq + +#endif /* CONFIG_USE_IRQ */ +#endif /* CONFIG_SPL_BUILD */ -- cgit v1.2.3