summaryrefslogtreecommitdiff
path: root/src/soc/nvidia
diff options
context:
space:
mode:
Diffstat (limited to 'src/soc/nvidia')
-rw-r--r--src/soc/nvidia/tegra132/Makefile.inc15
-rw-r--r--src/soc/nvidia/tegra132/bootblock.c2
-rw-r--r--src/soc/nvidia/tegra132/dma.c149
-rw-r--r--src/soc/nvidia/tegra132/dma.h190
-rw-r--r--src/soc/nvidia/tegra132/gpio.h70
-rw-r--r--src/soc/nvidia/tegra132/i2c.c55
-rw-r--r--src/soc/nvidia/tegra132/monotonic_timer.c27
-rw-r--r--src/soc/nvidia/tegra132/spi.c936
-rw-r--r--src/soc/nvidia/tegra132/spi.h73
9 files changed, 1517 insertions, 0 deletions
diff --git a/src/soc/nvidia/tegra132/Makefile.inc b/src/soc/nvidia/tegra132/Makefile.inc
index af33e2d1dc..abcdfd599e 100644
--- a/src/soc/nvidia/tegra132/Makefile.inc
+++ b/src/soc/nvidia/tegra132/Makefile.inc
@@ -3,7 +3,12 @@ bootblock-y += bootblock_asm.S
bootblock-y += cbfs.c
bootblock-y += timer.c
bootblock-y += clock.c
+bootblock-y += spi.c
+bootblock-y += i2c.c
+bootblock-y += dma.c
+bootblock-y += monotonic_timer.c
bootblock-y += ../tegra/gpio.c
+bootblock-y += ../tegra/i2c.c
bootblock-y += ../tegra/pingroup.c
bootblock-y += ../tegra/pinmux.c
bootblock-y += ../tegra/apbmisc.c
@@ -15,7 +20,12 @@ romstage-y += cbfs.c
romstage-y += cbmem.c
romstage-y += timer.c
romstage-y += clock.c
+romstage-y += spi.c
+romstage-y += i2c.c
+romstage-y += dma.c
+romstage-y += monotonic_timer.c
romstage-y += ../tegra/gpio.c
+romstage-y += ../tegra/i2c.c
romstage-y += ../tegra/pinmux.c
romstage-$(CONFIG_DRIVERS_UART) += uart.c
@@ -23,7 +33,12 @@ ramstage-y += cbfs.c
ramstage-y += cbmem.c
ramstage-y += timer.c
ramstage-y += clock.c
+ramstage-y += spi.c
+ramstage-y += i2c.c
+ramstage-y += dma.c
+ramstage-y += monotonic_timer.c
ramstage-y += ../tegra/gpio.c
+ramstage-y += ../tegra/i2c.c
ramstage-y += ../tegra/pinmux.c
ramstage-$(CONFIG_DRIVERS_UART) += uart.c
diff --git a/src/soc/nvidia/tegra132/bootblock.c b/src/soc/nvidia/tegra132/bootblock.c
index 3544c65c08..f377bc1ee0 100644
--- a/src/soc/nvidia/tegra132/bootblock.c
+++ b/src/soc/nvidia/tegra132/bootblock.c
@@ -59,5 +59,7 @@ void main(void)
clock_init();
+ bootblock_mainboard_init();
+
while(1);
}
diff --git a/src/soc/nvidia/tegra132/dma.c b/src/soc/nvidia/tegra132/dma.c
new file mode 100644
index 0000000000..3f236c3e22
--- /dev/null
+++ b/src/soc/nvidia/tegra132/dma.c
@@ -0,0 +1,149 @@
+/*
+ * (C) Copyright 2010,2011
+ * NVIDIA Corporation <www.nvidia.com>
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <console/console.h>
+
+#include <arch/io.h>
+#include <soc/addressmap.h>
+
+#include "dma.h"
+
+struct apb_dma * const apb_dma = (struct apb_dma *)TEGRA_APB_DMA_BASE;
+
+#define APB_DMA_OFFSET(n) \
+ (struct apb_dma_channel_regs *)(TEGRA_APB_DMA_BASE + n)
+struct apb_dma_channel apb_dma_channels[] = {
+ { .num = 0, .regs = APB_DMA_OFFSET(0x1000) },
+ { .num = 1, .regs = APB_DMA_OFFSET(0x1040) },
+ { .num = 2, .regs = APB_DMA_OFFSET(0x1080) },
+ { .num = 3, .regs = APB_DMA_OFFSET(0x10c0) },
+ { .num = 4, .regs = APB_DMA_OFFSET(0x1100) },
+ { .num = 5, .regs = APB_DMA_OFFSET(0x1140) },
+ { .num = 6, .regs = APB_DMA_OFFSET(0x1180) },
+ { .num = 7, .regs = APB_DMA_OFFSET(0x11c0) },
+ { .num = 8, .regs = APB_DMA_OFFSET(0x1200) },
+ { .num = 9, .regs = APB_DMA_OFFSET(0x1240) },
+ { .num = 10, .regs = APB_DMA_OFFSET(0x1280) },
+ { .num = 11, .regs = APB_DMA_OFFSET(0x12c0) },
+ { .num = 12, .regs = APB_DMA_OFFSET(0x1300) },
+ { .num = 13, .regs = APB_DMA_OFFSET(0x1340) },
+ { .num = 14, .regs = APB_DMA_OFFSET(0x1380) },
+ { .num = 15, .regs = APB_DMA_OFFSET(0x13c0) },
+ { .num = 16, .regs = APB_DMA_OFFSET(0x1400) },
+ { .num = 17, .regs = APB_DMA_OFFSET(0x1440) },
+ { .num = 18, .regs = APB_DMA_OFFSET(0x1480) },
+ { .num = 19, .regs = APB_DMA_OFFSET(0x14c0) },
+ { .num = 20, .regs = APB_DMA_OFFSET(0x1500) },
+ { .num = 21, .regs = APB_DMA_OFFSET(0x1540) },
+ { .num = 22, .regs = APB_DMA_OFFSET(0x1580) },
+ { .num = 23, .regs = APB_DMA_OFFSET(0x15c0) },
+ { .num = 24, .regs = APB_DMA_OFFSET(0x1600) },
+ { .num = 25, .regs = APB_DMA_OFFSET(0x1640) },
+ { .num = 26, .regs = APB_DMA_OFFSET(0x1680) },
+ { .num = 27, .regs = APB_DMA_OFFSET(0x16c0) },
+ { .num = 28, .regs = APB_DMA_OFFSET(0x1700) },
+ { .num = 29, .regs = APB_DMA_OFFSET(0x1740) },
+ { .num = 30, .regs = APB_DMA_OFFSET(0x1780) },
+ { .num = 31, .regs = APB_DMA_OFFSET(0x17c0) },
+};
+
+int dma_busy(struct apb_dma_channel * const channel)
+{
+ /*
+ * In continuous mode, the BSY_n bit in APB_DMA_STATUS and
+ * BSY in APBDMACHAN_CHANNEL_n_STA_0 will remain set as '1' so long
+ * as the channel is enabled. So for this function we'll use the
+ * DMA_ACTIVITY bit.
+ */
+ return read32(&channel->regs->sta) & APB_STA_DMA_ACTIVITY ? 1 : 0;
+}
+/* claim a DMA channel */
+struct apb_dma_channel * const dma_claim(void)
+{
+ int i;
+ struct apb_dma_channel_regs *regs = NULL;
+
+ /*
+ * Set global enable bit, otherwise register access to channel
+ * DMA registers will not be possible.
+ */
+ setbits_le32(&apb_dma->command, APB_COMMAND_GEN);
+
+ for (i = 0; i < ARRAY_SIZE(apb_dma_channels); i++) {
+ regs = apb_dma_channels[i].regs;
+
+ if (!apb_dma_channels[i].in_use) {
+ u32 status = read32(&regs->sta);
+ if (status & (1 << i)) {
+ /* FIXME: should this be fatal? */
+ printk(BIOS_DEBUG, "%s: DMA channel %d busy?\n",
+ __func__, i);
+ }
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(apb_dma_channels))
+ return NULL;
+
+ apb_dma_channels[i].in_use = 1;
+ return &apb_dma_channels[i];
+}
+
+/* release a DMA channel */
+void dma_release(struct apb_dma_channel * const channel)
+{
+ int i;
+
+ /* FIXME: make this "thread" friendly */
+ while (dma_busy(channel))
+ ;
+
+ channel->in_use = 0;
+
+ /* clear the global enable bit if no channels are in use */
+ for (i = 0; i < ARRAY_SIZE(apb_dma_channels); i++) {
+ if (apb_dma_channels[i].in_use)
+ return;
+ }
+
+ clrbits_le32(&apb_dma->command, APB_COMMAND_GEN);
+}
+
+int dma_start(struct apb_dma_channel * const channel)
+{
+ struct apb_dma_channel_regs *regs = channel->regs;
+
+ /* Set ENB bit for this channel */
+ setbits_le32(&regs->csr, APB_CSR_ENB);
+
+ return 0;
+}
+
+int dma_stop(struct apb_dma_channel * const channel)
+{
+ struct apb_dma_channel_regs *regs = channel->regs;
+
+ /* Clear ENB bit for this channel */
+ clrbits_le32(&regs->csr, APB_CSR_ENB);
+
+ return 0;
+}
diff --git a/src/soc/nvidia/tegra132/dma.h b/src/soc/nvidia/tegra132/dma.h
new file mode 100644
index 0000000000..7b07cbc89b
--- /dev/null
+++ b/src/soc/nvidia/tegra132/dma.h
@@ -0,0 +1,190 @@
+/*
+ * (C) Copyright 2010,2011
+ * NVIDIA Corporation <www.nvidia.com>
+ * Copyright (C) 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVIDIA_TEGRA132_DMA_H__
+#define __NVIDIA_TEGRA132_DMA_H__
+
+#include <inttypes.h>
+#include <soc/addressmap.h>
+
+/*
+ * The DMA engine operates on 4 bytes at a time, so make sure any data
+ * passed via DMA is aligned to avoid underrun/overrun.
+ */
+#define TEGRA_DMA_ALIGN_BYTES 4
+
+/*
+ * Note: Many APB DMA controller registers are laid out such that each
+ * bit controls or represents the status for the corresponding channel.
+ * So we will not bother to list each individual bit in this case.
+ */
+#define APB_COMMAND_GEN (1 << 31)
+
+#define APB_CNTRL_REG_COUNT_VALUE_MASK 0xffff
+#define APB_CNTRL_REG_COUNT_VALUE_SHIFT 0
+
+/*
+ * Note: Many APB DMA controller registers are laid out such that each
+ * bit controls or represents the status for the corresponding channel.
+ * So we will not bother to list each individual bit in this case.
+ */
+#define APB_COMMAND_GEN (1 << 31)
+
+#define APB_CNTRL_REG_COUNT_VALUE_MASK 0xffff
+#define APB_CNTRL_REG_COUNT_VALUE_SHIFT 0
+struct apb_dma {
+ u32 command; /* 0x00 */
+ u32 status; /* 0x04 */
+ u32 rsvd1[2];
+ u32 cntrl_reg; /* 0x10 */
+ u32 irq_sta_cpu; /* 0x14 */
+ u32 irq_sta_cop; /* 0x18 */
+ u32 irq_mask; /* 0x1c */
+ u32 irq_mask_set; /* 0x20 */
+ u32 irq_mask_clr; /* 0x24 */
+ u32 trig_reg; /* 0x28 */
+ u32 channel_trig_reg; /* 0x2c */
+ u32 dma_status; /* 0x30 */
+ u32 channel_en_reg; /* 0x34 */
+ u32 security_reg; /* 0x38 */
+ u32 channel_swid; /* 0x3c */
+ u32 rsvd[1];
+ u32 chan_wt_reg0; /* 0x44 */
+ u32 chan_wt_reg1; /* 0x48 */
+ u32 chan_wt_reg2; /* 0x4c */
+ u32 chan_wr_reg3; /* 0x50 */
+ u32 channel_swid1; /* 0x54 */
+} __attribute__((packed));
+check_member(apb_dma, channel_swid1, 0x54);
+
+/*
+ * Naming in the doc included a superfluous _CHANNEL_n_ for
+ * each entry and was left out for the sake of conciseness.
+ */
+#define APB_CSR_ENB (1 << 31)
+#define APB_CSR_IE_EOC (1 << 30)
+#define APB_CSR_HOLD (1 << 29)
+#define APB_CSR_DIR (1 << 28)
+#define APB_CSR_ONCE (1 << 27)
+#define APB_CSR_FLOW (1 << 21)
+#define APB_CSR_REQ_SEL_MASK 0x1f
+#define APB_CSR_REQ_SEL_SHIFT 16
+
+enum apbdmachan_req_sel {
+ APBDMA_SLAVE_CNTR_REQ = 0,
+ APBDMA_SLAVE_APBIF_CH0 = 1,
+ APBDMA_SLAVE_APBIF_CH1 = 2,
+ APBDMA_SLAVE_APBIF_CH2 = 3,
+ APBDMA_SLAVE_APBIF_CH3 = 4,
+ APBDMA_SLAVE_HSI = 5,
+ APBDMA_SLAVE_APBIF_CH4 = 6,
+ APBDMA_SLAVE_APBIF_CH5 = 7,
+ APBDMA_SLAVE_UART_A = 8,
+ APBDMA_SLAVE_UART_B = 9,
+ APBDMA_SLAVE_UART_C = 10,
+ APBDMA_SLAVE_DTV = 11,
+ APBDMA_SLAVE_APBIF_CH6 = 12,
+ APBDMA_SLAVE_APBIF_CH7 = 13,
+ APBDMA_SLAVE_APBIF_CH8 = 14,
+ APBDMA_SLAVE_SL2B1 = 15,
+ APBDMA_SLAVE_SL2B2 = 16,
+ APBDMA_SLAVE_SL2B3 = 17,
+ APBDMA_SLAVE_SL2B4 = 18,
+ APBDMA_SLAVE_UART_D = 19,
+ APBDMA_SLAVE_UART_E = 20,
+ APBDMA_SLAVE_I2C = 21,
+ APBDMA_SLAVE_I2C2 = 22,
+ APBDMA_SLAVE_I2C3 = 23,
+ APBDMA_SLAVE_DVC_I2C = 24,
+ APBDMA_SLAVE_OWR = 25,
+ APBDMA_SLAVE_I2C4 = 26,
+ APBDMA_SLAVE_SL2B5 = 27,
+ APBDMA_SLAVE_SL2B6 = 28,
+ APBDMA_SLAVE_APBIF_CH9 = 29,
+ APBDMA_SLAVE_I2C6 = 30,
+ APBDMA_SLAVE_NA31 = 31,
+};
+
+#define APB_STA_BSY (1 << 31)
+#define APB_STA_ISE_EOC (1 << 30)
+#define APB_STA_HALT (1 << 29)
+#define APB_STA_PING_PONG_STA (1 << 28)
+#define APB_STA_DMA_ACTIVITY (1 << 27)
+#define APB_STA_CHANNEL_PAUSE (1 << 26)
+
+#define APB_CSRE_CHANNEL_PAUSE (1 << 31)
+#define APB_CSRE_TRIG_SEL_MASK 0x3f
+#define APB_CSRE_TRIG_SEL_SHIFT 14
+
+#define AHB_PTR_MASK (0x3fffffff)
+#define AHB_PTR_SHIFT 2
+
+#define AHB_SEQ_INTR_ENB (1 << 31)
+#define AHB_BUS_WIDTH_MASK 0x7
+#define AHB_BUS_WIDTH_SHIFT 28
+#define AHB_DATA_SWAP (1 << 27)
+#define AHB_BURST_MASK 0x7
+#define AHB_BURST_SHIFT 24
+#define AHB_SEQ_DBL_BUF (1 << 19)
+#define AHB_SEQ_WRAP_MASK 0x7
+#define AHB_SEQ_WRAP_SHIFT 16
+
+#define APB_PTR_MASK 0x3fffffff
+#define APB_PTR_SHIFT 2
+
+#define APB_BUS_WIDTH_MASK 0x7
+#define APB_BUS_WIDTH_SHIFT 28
+#define APB_DATA_SWAP (1 << 27)
+#define APB_ADDR_WRAP_MASK 0x7
+#define APB_ADDR_WRAP_SHIFT 16
+
+#define APB_WORD_TRANSFER_MASK 0x0fffffff
+#define APB_WORD_TRANSFER_SHIFT 2
+
+struct apb_dma_channel_regs {
+ u32 csr; /* 0x00 */
+ u32 sta; /* 0x04 */
+ u32 dma_byte_sta; /* 0x08 */
+ u32 csre; /* 0x0c */
+ u32 ahb_ptr; /* 0x10 */
+ u32 ahb_seq; /* 0x14 */
+ u32 apb_ptr; /* 0x18 */
+ u32 apb_seq; /* 0x1c */
+ u32 wcount; /* 0x20 */
+ u32 word_transfer; /* 0x24 */
+} __attribute__((packed));
+check_member(apb_dma_channel_regs, word_transfer, 0x24);
+
+struct apb_dma_channel {
+ const int num;
+ struct apb_dma_channel_regs *regs;
+
+ /*
+ * Basic high-level semaphore that can be used to "claim"
+ * a DMA channel e.g. by SPI, I2C, or other peripheral driver.
+ */
+ int in_use;
+};
+
+struct apb_dma_channel * const dma_claim(void);
+void dma_release(struct apb_dma_channel * const channel);
+int dma_start(struct apb_dma_channel * const channel);
+int dma_stop(struct apb_dma_channel * const channel);
+int dma_busy(struct apb_dma_channel * const channel);
+
+#endif /* __NVIDIA_TEGRA132_DMA_H__ */
diff --git a/src/soc/nvidia/tegra132/gpio.h b/src/soc/nvidia/tegra132/gpio.h
new file mode 100644
index 0000000000..5563d65c77
--- /dev/null
+++ b/src/soc/nvidia/tegra132/gpio.h
@@ -0,0 +1,70 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __SOC_NVIDIA_TEGRA132_GPIO_H__
+#define __SOC_NVIDIA_TEGRA132_GPIO_H__
+
+#include <soc/nvidia/tegra/gpio.h>
+#include <stdint.h>
+
+#include "pinmux.h" /* for pinmux constants in GPIO macro */
+
+/* GPIO index constants. */
+
+#define GPIO_PORT_CONSTANTS(port) \
+ GPIO_##port##0_INDEX, GPIO_##port##1_INDEX, GPIO_##port##2_INDEX, \
+ GPIO_##port##3_INDEX, GPIO_##port##4_INDEX, GPIO_##port##5_INDEX, \
+ GPIO_##port##6_INDEX, GPIO_##port##7_INDEX
+
+enum {
+ GPIO_PORT_CONSTANTS(A),
+ GPIO_PORT_CONSTANTS(B),
+ GPIO_PORT_CONSTANTS(C),
+ GPIO_PORT_CONSTANTS(D),
+ GPIO_PORT_CONSTANTS(E),
+ GPIO_PORT_CONSTANTS(F),
+ GPIO_PORT_CONSTANTS(G),
+ GPIO_PORT_CONSTANTS(H),
+ GPIO_PORT_CONSTANTS(I),
+ GPIO_PORT_CONSTANTS(J),
+ GPIO_PORT_CONSTANTS(K),
+ GPIO_PORT_CONSTANTS(L),
+ GPIO_PORT_CONSTANTS(M),
+ GPIO_PORT_CONSTANTS(N),
+ GPIO_PORT_CONSTANTS(O),
+ GPIO_PORT_CONSTANTS(P),
+ GPIO_PORT_CONSTANTS(Q),
+ GPIO_PORT_CONSTANTS(R),
+ GPIO_PORT_CONSTANTS(S),
+ GPIO_PORT_CONSTANTS(T),
+ GPIO_PORT_CONSTANTS(U),
+ GPIO_PORT_CONSTANTS(V),
+ GPIO_PORT_CONSTANTS(W),
+ GPIO_PORT_CONSTANTS(X),
+ GPIO_PORT_CONSTANTS(Y),
+ GPIO_PORT_CONSTANTS(Z),
+ GPIO_PORT_CONSTANTS(AA),
+ GPIO_PORT_CONSTANTS(BB),
+ GPIO_PORT_CONSTANTS(CC),
+ GPIO_PORT_CONSTANTS(DD),
+ GPIO_PORT_CONSTANTS(EE),
+ GPIO_PORT_CONSTANTS(FF)
+};
+
+#endif /* __SOC_NVIDIA_TEGRA132_GPIO_H__ */
diff --git a/src/soc/nvidia/tegra132/i2c.c b/src/soc/nvidia/tegra132/i2c.c
new file mode 100644
index 0000000000..eada743914
--- /dev/null
+++ b/src/soc/nvidia/tegra132/i2c.c
@@ -0,0 +1,55 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <soc/addressmap.h>
+#include <soc/clock.h>
+#include <soc/nvidia/tegra/i2c.h>
+
+struct tegra_i2c_bus_info tegra_i2c_info[] = {
+ {
+ .base = (void *)TEGRA_I2C_BASE,
+ .reset_bit = CLK_L_I2C1,
+ .reset_func = &clock_reset_l
+ },
+ {
+ .base = (void *)TEGRA_I2C2_BASE,
+ .reset_bit = CLK_H_I2C2,
+ .reset_func = &clock_reset_h
+ },
+ {
+ .base = (void *)TEGRA_I2C3_BASE,
+ .reset_bit = CLK_U_I2C3,
+ .reset_func = &clock_reset_u
+ },
+ {
+ .base = (void *)TEGRA_I2C4_BASE,
+ .reset_bit = CLK_V_I2C4,
+ .reset_func = &clock_reset_v
+ },
+ {
+ .base = (void *)TEGRA_I2C5_BASE,
+ .reset_bit = CLK_H_I2C5,
+ .reset_func = &clock_reset_h
+ },
+ {
+ .base = (void *)TEGRA_I2C6_BASE,
+ .reset_bit = CLK_X_I2C6,
+ .reset_func = &clock_reset_x
+ }
+};
diff --git a/src/soc/nvidia/tegra132/monotonic_timer.c b/src/soc/nvidia/tegra132/monotonic_timer.c
new file mode 100644
index 0000000000..d6c30b4ffa
--- /dev/null
+++ b/src/soc/nvidia/tegra132/monotonic_timer.c
@@ -0,0 +1,27 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <arch/io.h>
+#include <soc/addressmap.h>
+#include <timer.h>
+
+void timer_monotonic_get(struct mono_time *mt)
+{
+ mono_time_set_usecs(mt, read32((void *)TEGRA_TMRUS_BASE));
+}
diff --git a/src/soc/nvidia/tegra132/spi.c b/src/soc/nvidia/tegra132/spi.c
new file mode 100644
index 0000000000..f1be207ced
--- /dev/null
+++ b/src/soc/nvidia/tegra132/spi.c
@@ -0,0 +1,936 @@
+/*
+ * NVIDIA Tegra SPI controller (T114 and later)
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation
+ * Copyright (C) 2013 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <assert.h>
+#include <cbfs.h>
+#include <cbfs_core.h>
+#include <inttypes.h>
+#include <spi-generic.h>
+#include <spi_flash.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <timer.h>
+#include <arch/cache.h>
+#include <arch/io.h>
+#include <console/console.h>
+#include <soc/addressmap.h>
+#include <delay.h>
+
+#include "dma.h"
+#include "spi.h"
+
+#if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
+# define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
+#else
+# define DEBUG_SPI(x,...)
+#endif
+
+/*
+ * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
+ * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
+ */
+#define SPI_PACKET_SIZE_BYTES 1
+#define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
+#define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
+
+/*
+ * This is used to workaround an issue seen where it may take some time for
+ * packets to show up in the FIFO after they have been received and the
+ * BLOCK_COUNT has been incremented.
+ */
+#define SPI_FIFO_XFER_TIMEOUT_US 1000
+
+/* COMMAND1 */
+#define SPI_CMD1_GO (1 << 31)
+#define SPI_CMD1_M_S (1 << 30)
+#define SPI_CMD1_MODE_MASK 0x3
+#define SPI_CMD1_MODE_SHIFT 28
+#define SPI_CMD1_CS_SEL_MASK 0x3
+#define SPI_CMD1_CS_SEL_SHIFT 26
+#define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
+#define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
+#define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
+#define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
+#define SPI_CMD1_CS_SW_HW (1 << 21)
+#define SPI_CMD1_CS_SW_VAL (1 << 20)
+#define SPI_CMD1_IDLE_SDA_MASK 0x3
+#define SPI_CMD1_IDLE_SDA_SHIFT 18
+#define SPI_CMD1_BIDIR (1 << 17)
+#define SPI_CMD1_LSBI_FE (1 << 16)
+#define SPI_CMD1_LSBY_FE (1 << 15)
+#define SPI_CMD1_BOTH_EN_BIT (1 << 14)
+#define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
+#define SPI_CMD1_RX_EN (1 << 12)
+#define SPI_CMD1_TX_EN (1 << 11)
+#define SPI_CMD1_PACKED (1 << 5)
+#define SPI_CMD1_BIT_LEN_MASK 0x1f
+#define SPI_CMD1_BIT_LEN_SHIFT 0
+
+/* COMMAND2 */
+#define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
+#define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
+#define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
+#define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
+
+/* SPI_TRANS_STATUS */
+#define SPI_STATUS_RDY (1 << 30)
+#define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
+#define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
+#define SPI_STATUS_BLOCK_COUNT 0xffff
+#define SPI_STATUS_BLOCK_COUNT_SHIFT 0
+
+/* SPI_FIFO_STATUS */
+#define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
+#define SPI_FIFO_STATUS_FRAME_END (1 << 30)
+#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
+#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
+#define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
+#define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
+#define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
+#define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
+#define SPI_FIFO_STATUS_ERR (1 << 8)
+#define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
+#define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
+#define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
+#define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
+#define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
+#define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
+#define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
+#define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
+
+/* SPI_DMA_CTL */
+#define SPI_DMA_CTL_DMA (1 << 31)
+#define SPI_DMA_CTL_CONT (1 << 30)
+#define SPI_DMA_CTL_IE_RX (1 << 29)
+#define SPI_DMA_CTL_IE_TX (1 << 28)
+#define SPI_DMA_CTL_RX_TRIG_MASK 0x3
+#define SPI_DMA_CTL_RX_TRIG_SHIFT 19
+#define SPI_DMA_CTL_TX_TRIG_MASK 0x3
+#define SPI_DMA_CTL_TX_TRIG_SHIFT 15
+
+/* SPI_DMA_BLK */
+#define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
+#define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
+
+static struct tegra_spi_channel tegra_spi_channels[] = {
+ /*
+ * Note: Tegra pinmux must be setup for corresponding SPI channel in
+ * order for its registers to be accessible. If pinmux has not been
+ * set up, access to the channel's registers will simply hang.
+ *
+ * TODO(dhendrix): Clarify or remove this comment (is clock setup
+ * necessary first, or just pinmux, or both?)
+ */
+ {
+ .slave = { .bus = 1, },
+ .regs = (struct tegra_spi_regs *)TEGRA_SPI1_BASE,
+ .req_sel = APBDMA_SLAVE_SL2B1,
+ },
+ {
+ .slave = { .bus = 2, },
+ .regs = (struct tegra_spi_regs *)TEGRA_SPI2_BASE,
+ .req_sel = APBDMA_SLAVE_SL2B2,
+ },
+ {
+ .slave = { .bus = 3, },
+ .regs = (struct tegra_spi_regs *)TEGRA_SPI3_BASE,
+ .req_sel = APBDMA_SLAVE_SL2B3,
+ },
+ {
+ .slave = { .bus = 4, },
+ .regs = (struct tegra_spi_regs *)TEGRA_SPI4_BASE,
+ .req_sel = APBDMA_SLAVE_SL2B4,
+ },
+ {
+ .slave = { .bus = 5, },
+ .regs = (struct tegra_spi_regs *)TEGRA_SPI5_BASE,
+ .req_sel = APBDMA_SLAVE_SL2B5,
+ },
+ {
+ .slave = { .bus = 6, },
+ .regs = (struct tegra_spi_regs *)TEGRA_SPI6_BASE,
+ .req_sel = APBDMA_SLAVE_SL2B6,
+ },
+};
+
+enum spi_direction {
+ SPI_SEND,
+ SPI_RECEIVE,
+};
+
+struct tegra_spi_channel *tegra_spi_init(unsigned int bus)
+{
+ int i;
+ struct tegra_spi_channel *spi = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_spi_channels); i++) {
+ if (tegra_spi_channels[i].slave.bus == bus) {
+ spi = &tegra_spi_channels[i];
+ break;
+ }
+ }
+ if (!spi)
+ return NULL;
+
+ /* software drives chip-select, set value to high */
+ setbits_le32(&spi->regs->command1,
+ SPI_CMD1_CS_SW_HW | SPI_CMD1_CS_SW_VAL);
+
+ /* 8-bit transfers, unpacked mode, most significant bit first */
+ clrbits_le32(&spi->regs->command1,
+ SPI_CMD1_BIT_LEN_MASK | SPI_CMD1_PACKED);
+ setbits_le32(&spi->regs->command1, 7 << SPI_CMD1_BIT_LEN_SHIFT);
+
+ return spi;
+}
+
+static struct tegra_spi_channel * const to_tegra_spi(int bus) {
+ return &tegra_spi_channels[bus - 1];
+}
+
+static unsigned int tegra_spi_speed(unsigned int bus)
+{
+ /* FIXME: implement this properly, for now use max value (50MHz) */
+ return 50000000;
+}
+
+int spi_claim_bus(struct spi_slave *slave)
+{
+ struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
+ u32 val;
+
+ tegra_spi_init(slave->bus);
+
+ val = read32(&regs->command1);
+
+ /* select appropriate chip-select line */
+ val &= ~(SPI_CMD1_CS_SEL_MASK << SPI_CMD1_CS_SEL_SHIFT);
+ val |= (slave->cs << SPI_CMD1_CS_SEL_SHIFT);
+
+ /* drive chip-select with the inverse of the "inactive" value */
+ if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
+ val &= ~SPI_CMD1_CS_SW_VAL;
+ else
+ val |= SPI_CMD1_CS_SW_VAL;
+
+ write32(val, &regs->command1);
+ return 0;
+}
+
+void spi_release_bus(struct spi_slave *slave)
+{
+ struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
+ u32 val;
+
+ val = read32(&regs->command1);
+
+ if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
+ val |= SPI_CMD1_CS_SW_VAL;
+ else
+ val &= ~SPI_CMD1_CS_SW_VAL;
+
+ write32(val, &regs->command1);
+}
+
+static void dump_fifo_status(struct tegra_spi_channel *spi)
+{
+ u32 status = read32(&spi->regs->fifo_status);
+
+ printk(BIOS_INFO, "Raw FIFO status: 0x%08x\n", status);
+ if (status & SPI_FIFO_STATUS_TX_FIFO_OVF)
+ printk(BIOS_INFO, "\tTx overflow detected\n");
+ if (status & SPI_FIFO_STATUS_TX_FIFO_UNR)
+ printk(BIOS_INFO, "\tTx underrun detected\n");
+ if (status & SPI_FIFO_STATUS_RX_FIFO_OVF)
+ printk(BIOS_INFO, "\tRx overflow detected\n");
+ if (status & SPI_FIFO_STATUS_RX_FIFO_UNR)
+ printk(BIOS_INFO, "\tRx underrun detected\n");
+
+ printk(BIOS_INFO, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
+ read32(&spi->regs->tx_fifo), read32(&spi->regs->tx_data));
+ printk(BIOS_INFO, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
+ read32(&spi->regs->rx_fifo), read32(&spi->regs->rx_data));
+}
+
+static void clear_fifo_status(struct tegra_spi_channel *spi)
+{
+ clrbits_le32(&spi->regs->fifo_status,
+ SPI_FIFO_STATUS_ERR |
+ SPI_FIFO_STATUS_TX_FIFO_OVF |
+ SPI_FIFO_STATUS_TX_FIFO_UNR |
+ SPI_FIFO_STATUS_RX_FIFO_OVF |
+ SPI_FIFO_STATUS_RX_FIFO_UNR);
+}
+
+static void dump_spi_regs(struct tegra_spi_channel *spi)
+{
+ printk(BIOS_INFO, "SPI regs:\n"
+ "\tdma_blk: 0x%08x\n"
+ "\tcommand1: 0x%08x\n"
+ "\tdma_ctl: 0x%08x\n"
+ "\ttrans_status: 0x%08x\n",
+ read32(&spi->regs->dma_blk),
+ read32(&spi->regs->command1),
+ read32(&spi->regs->dma_ctl),
+ read32(&spi->regs->trans_status));
+}
+
+static void dump_dma_regs(struct apb_dma_channel *dma)
+{
+ printk(BIOS_INFO, "DMA regs:\n"
+ "\tahb_ptr: 0x%08x\n"
+ "\tapb_ptr: 0x%08x\n"
+ "\tahb_seq: 0x%08x\n"
+ "\tapb_seq: 0x%08x\n"
+ "\tcsr: 0x%08x\n"
+ "\tcsre: 0x%08x\n"
+ "\twcount: 0x%08x\n"
+ "\tdma_byte_sta: 0x%08x\n"
+ "\tword_transfer: 0x%08x\n",
+ read32(&dma->regs->ahb_ptr),
+ read32(&dma->regs->apb_ptr),
+ read32(&dma->regs->ahb_seq),
+ read32(&dma->regs->apb_seq),
+ read32(&dma->regs->csr),
+ read32(&dma->regs->csre),
+ read32(&dma->regs->wcount),
+ read32(&dma->regs->dma_byte_sta),
+ read32(&dma->regs->word_transfer));
+}
+
+static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
+{
+ /* FIXME: Make this take total packet size into account */
+ return read32(&spi->regs->trans_status) &
+ (SPI_STATUS_BLOCK_COUNT << SPI_STATUS_BLOCK_COUNT_SHIFT);
+}
+
+/*
+ * This calls udelay() with a calculated value based on the SPI speed and
+ * number of bytes remaining to be transferred. It assumes that if the
+ * calculated delay period is less than MIN_DELAY_US then it is probably
+ * not worth the overhead of yielding.
+ */
+#define MIN_DELAY_US 250
+static void spi_delay(struct tegra_spi_channel *spi,
+ unsigned int bytes_remaining)
+{
+ unsigned int ns_per_byte, delay_us;
+
+ ns_per_byte = 1000000000 / (tegra_spi_speed(spi->slave.bus) / 8);
+ delay_us = (ns_per_byte * bytes_remaining) / 1000;
+
+ if (delay_us < MIN_DELAY_US)
+ return;
+
+ udelay(delay_us);
+}
+
+static void tegra_spi_wait(struct tegra_spi_channel *spi)
+{
+ unsigned int count, dma_blk;
+
+ dma_blk = 1 + (read32(&spi->regs->dma_blk) &
+ (SPI_DMA_CTL_BLOCK_SIZE_MASK << SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
+
+ while ((count = spi_byte_count(spi)) != dma_blk)
+ spi_delay(spi, dma_blk - count);
+}
+
+
+static int fifo_error(struct tegra_spi_channel *spi)
+{
+ return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
+}
+
+static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi,
+ unsigned int bytes, enum spi_direction dir)
+{
+ u8 *p = spi->out_buf;
+ unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
+ u32 flush_mask, enable_mask;
+
+ if (dir == SPI_SEND) {
+ flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH;
+ enable_mask = SPI_CMD1_TX_EN;
+ } else {
+ flush_mask = SPI_FIFO_STATUS_RX_FIFO_FLUSH;
+ enable_mask = SPI_CMD1_RX_EN;
+ }
+
+ setbits_le32(&spi->regs->fifo_status, flush_mask);
+ while (read32(&spi->regs->fifo_status) & flush_mask)
+ ;
+
+ setbits_le32(&spi->regs->command1, enable_mask);
+
+ /* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
+ * PIO transfers */
+ write32(todo - 1, &spi->regs->dma_blk);
+
+ if (dir == SPI_SEND) {
+ unsigned int to_fifo = bytes;
+ while (to_fifo) {
+ write32(*p, &spi->regs->tx_fifo);
+ p++;
+ to_fifo--;
+ }
+ }
+
+ return todo;
+}
+
+static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
+{
+ setbits_le32(&spi->regs->trans_status, SPI_STATUS_RDY);
+ setbits_le32(&spi->regs->command1, SPI_CMD1_GO);
+ /* Make sure the write to command1 completes. */
+ read32(&spi->regs->command1);
+}
+
+static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
+{
+ return (read32(&spi->regs->fifo_status) >>
+ SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT) &
+ SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK;
+}
+
+static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
+{
+ u8 *p = spi->in_buf;
+ struct mono_time start;
+ struct rela_time rt;
+
+ clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN);
+
+ /*
+ * Allow some time in case the Rx FIFO does not yet have
+ * all packets pushed into it. See chrome-os-partner:24215.
+ */
+ timer_monotonic_get(&start);
+ do {
+ if (rx_fifo_count(spi) == spi_byte_count(spi))
+ break;
+ rt = current_time_from(&start);
+ } while (rela_time_in_microseconds(&rt) < SPI_FIFO_XFER_TIMEOUT_US);
+
+ while (!(read32(&spi->regs->fifo_status) &
+ SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
+ *p = read8(&spi->regs->rx_fifo);
+ p++;
+ }
+
+ if (fifo_error(spi)) {
+ printk(BIOS_ERR, "%s: ERROR:\n", __func__);
+ dump_spi_regs(spi);
+ dump_fifo_status(spi);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void setup_dma_params(struct tegra_spi_channel *spi,
+ struct apb_dma_channel *dma)
+{
+ /* APB bus width = 8-bits, address wrap for each word */
+ clrbits_le32(&dma->regs->apb_seq,
+ APB_BUS_WIDTH_MASK << APB_BUS_WIDTH_SHIFT);
+ /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
+ * no address wrapping */
+ clrsetbits_le32(&dma->regs->ahb_seq,
+ (AHB_BURST_MASK << AHB_BURST_SHIFT),
+ 4 << AHB_BURST_SHIFT);
+
+ /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
+ * flow control. */
+ clrbits_le32(&dma->regs->csr,
+ APB_CSR_REQ_SEL_MASK << APB_CSR_REQ_SEL_SHIFT);
+ setbits_le32(&dma->regs->csr, APB_CSR_ONCE | APB_CSR_FLOW |
+ (spi->req_sel << APB_CSR_REQ_SEL_SHIFT));
+}
+
+static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi,
+ unsigned int bytes, enum spi_direction dir)
+{
+ unsigned int todo, wcount;
+
+ /*
+ * For DMA we need to think of things in terms of word count.
+ * AHB width is fixed at 32-bits. To avoid overrunning
+ * the in/out buffers we must align down. (Note: lowest 2-bits
+ * in WCOUNT register are ignored, and WCOUNT seems to count
+ * words starting at n-1)
+ *
+ * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
+ * WCOUNT should be 4. The remaining 3 bytes must be transferred
+ * using PIO.
+ */
+ todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_DMA - TEGRA_DMA_ALIGN_BYTES);
+ todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
+ wcount = ALIGN_DOWN(todo - TEGRA_DMA_ALIGN_BYTES, TEGRA_DMA_ALIGN_BYTES);
+
+ if (dir == SPI_SEND) {
+ spi->dma_out = dma_claim();
+ if (!spi->dma_out)
+ return -1;
+
+ /* ensure bytes to send will be visible to DMA controller */
+ dcache_clean_by_mva(spi->out_buf, bytes);
+
+ write32((uintptr_t)&spi->regs->tx_fifo, &spi->dma_out->regs->apb_ptr);
+ write32((uintptr_t)spi->out_buf, &spi->dma_out->regs->ahb_ptr);
+ setbits_le32(&spi->dma_out->regs->csr, APB_CSR_DIR);
+ setup_dma_params(spi, spi->dma_out);
+ write32(wcount, &spi->dma_out->regs->wcount);
+ } else {
+ spi->dma_in = dma_claim();
+ if (!spi->dma_in)
+ return -1;
+
+ /* avoid data collisions */
+ dcache_clean_invalidate_by_mva(spi->in_buf, bytes);
+
+ write32((uintptr_t)&spi->regs->rx_fifo, &spi->dma_in->regs->apb_ptr);
+ write32((uintptr_t)spi->in_buf, &spi->dma_in->regs->ahb_ptr);
+ clrbits_le32(&spi->dma_in->regs->csr, APB_CSR_DIR);
+ setup_dma_params(spi, spi->dma_in);
+ write32(wcount, &spi->dma_in->regs->wcount);
+ }
+
+ /* BLOCK_SIZE starts at n-1 */
+ write32(todo - 1, &spi->regs->dma_blk);
+ return todo;
+}
+
+static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
+{
+ /*
+ * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
+ * (set bit to clear) between each transaction. Otherwise the next
+ * transaction does not start.
+ */
+ setbits_le32(&spi->regs->trans_status, SPI_STATUS_RDY);
+
+ if (spi->dma_out)
+ setbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN);
+ if (spi->dma_in)
+ setbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN);
+
+ /*
+ * To avoid underrun conditions, enable APB DMA before SPI DMA for
+ * Tx and enable SPI DMA before APB DMA before Rx.
+ */
+ if (spi->dma_out)
+ dma_start(spi->dma_out);
+ setbits_le32(&spi->regs->dma_ctl, SPI_DMA_CTL_DMA);
+ if (spi->dma_in)
+ dma_start(spi->dma_in);
+
+
+}
+
+static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
+{
+ int ret;
+ unsigned int todo;
+
+ todo = read32(&spi->dma_in->regs->wcount);
+
+ if (spi->dma_in) {
+ while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
+ dma_busy(spi->dma_in))
+ ; /* this shouldn't take long, no udelay */
+ dma_stop(spi->dma_in);
+ clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN);
+ dma_release(spi->dma_in);
+ }
+
+ if (spi->dma_out) {
+ while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
+ dma_busy(spi->dma_out))
+ spi_delay(spi, todo - spi_byte_count(spi));
+ clrbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN);
+ dma_stop(spi->dma_out);
+ dma_release(spi->dma_out);
+ }
+
+ if (fifo_error(spi)) {
+ printk(BIOS_ERR, "%s: ERROR:\n", __func__);
+ dump_dma_regs(spi->dma_out);
+ dump_dma_regs(spi->dma_in);
+ dump_spi_regs(spi);
+ dump_fifo_status(spi);
+ ret = -1;
+ goto done;
+ }
+
+ ret = 0;
+done:
+ spi->dma_in = NULL;
+ spi->dma_out = NULL;
+ return ret;
+}
+
+/*
+ * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
+ * sets transfer mode used by this channel (if not set already).
+ *
+ * A few caveats to watch out for:
+ * - The number of bytes which can be transferred may be smaller than the
+ * number of bytes the caller specifies. The number of bytes ready for
+ * a transfer will be returned (unless an error occurs).
+ *
+ * - Only one mode can be used for both RX and TX. The transfer mode of the
+ * SPI channel (spi->xfer_mode) is checked each time this function is called.
+ * If conflicting modes are detected, spi->xfer_mode will be set to
+ * XFER_MODE_NONE and an error will be returned.
+ *
+ * Returns bytes ready for transfer if successful, <0 to indicate error.
+ */
+static int xfer_setup(struct tegra_spi_channel *spi, void *buf,
+ unsigned int bytes, enum spi_direction dir)
+{
+ unsigned int line_size = dcache_line_bytes();
+ unsigned int align;
+ int ret = -1;
+
+ if (!bytes)
+ return 0;
+
+ if (dir == SPI_SEND)
+ spi->out_buf = buf;
+ else if (dir == SPI_RECEIVE)
+ spi->in_buf = buf;
+
+ /*
+ * Alignment consideratons:
+ * When we enable caching we'll need to clean/invalidate portions of
+ * memory. So we need to be careful about memory alignment. Also, DMA
+ * likes to operate on 4-bytes at a time on the AHB side. So for
+ * example, if we only want to receive 1 byte, 4 bytes will be be
+ * written in memory even if those extra 3 bytes are beyond the length
+ * we want.
+ *
+ * For now we'll use PIO to send/receive unaligned bytes. We may
+ * consider setting aside some space for a kind of bounce buffer to
+ * stay in DMA mode once we have a chance to benchmark the two
+ * approaches.
+ */
+
+ if (bytes < line_size) {
+ if (spi->xfer_mode == XFER_MODE_DMA) {
+ spi->xfer_mode = XFER_MODE_NONE;
+ ret = -1;
+ } else {
+ spi->xfer_mode = XFER_MODE_PIO;
+ ret = tegra_spi_pio_prepare(spi, bytes, dir);
+ }
+ goto done;
+ }
+
+ /* transfer bytes before the aligned boundary */
+ align = line_size - ((uintptr_t)buf % line_size);
+ if ((align != 0) && (align != line_size)) {
+ if (spi->xfer_mode == XFER_MODE_DMA) {
+ spi->xfer_mode = XFER_MODE_NONE;
+ ret = -1;
+ } else {
+ spi->xfer_mode = XFER_MODE_PIO;
+ ret = tegra_spi_pio_prepare(spi, align, dir);
+ }
+ goto done;
+ }
+
+ /* do aligned DMA transfer */
+ align = (((uintptr_t)buf + bytes) % line_size);
+ if (bytes - align > 0) {
+ unsigned int dma_bytes = bytes - align;
+
+ if (spi->xfer_mode == XFER_MODE_PIO) {
+ spi->xfer_mode = XFER_MODE_NONE;
+ ret = -1;
+ } else {
+ spi->xfer_mode = XFER_MODE_DMA;
+ ret = tegra_spi_dma_prepare(spi, dma_bytes, dir);
+ }
+
+ goto done;
+ }
+
+ /* transfer any remaining unaligned bytes */
+ if (align) {
+ if (spi->xfer_mode == XFER_MODE_DMA) {
+ spi->xfer_mode = XFER_MODE_NONE;
+ ret = -1;
+ } else {
+ spi->xfer_mode = XFER_MODE_PIO;
+ ret = tegra_spi_pio_prepare(spi, align, dir);
+ }
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
+static void xfer_start(struct tegra_spi_channel *spi)
+{
+ if (spi->xfer_mode == XFER_MODE_DMA)
+ tegra_spi_dma_start(spi);
+ else
+ tegra_spi_pio_start(spi);
+}
+
+static void xfer_wait(struct tegra_spi_channel *spi)
+{
+ tegra_spi_wait(spi);
+}
+
+static int xfer_finish(struct tegra_spi_channel *spi)
+{
+ int ret;
+
+ if (spi->xfer_mode == XFER_MODE_DMA)
+ ret = tegra_spi_dma_finish(spi);
+ else
+ ret = tegra_spi_pio_finish(spi);
+
+ spi->xfer_mode = XFER_MODE_NONE;
+ return ret;
+}
+
+int spi_xfer(struct spi_slave *slave, const void *dout,
+ unsigned int out_bytes, void *din, unsigned int in_bytes)
+{
+ struct tegra_spi_channel *spi = to_tegra_spi(slave->bus);
+ u8 *out_buf = (u8 *)dout;
+ u8 *in_buf = (u8 *)din;
+ unsigned int todo;
+ int ret = 0;
+
+ /* tegra bus numbers start at 1 */
+ ASSERT(slave->bus >= 1 && slave->bus <= ARRAY_SIZE(tegra_spi_channels));
+
+ while (out_bytes || in_bytes) {
+ int x = 0;
+
+ if (out_bytes == 0)
+ todo = in_bytes;
+ else if (in_bytes == 0)
+ todo = out_bytes;
+ else
+ todo = MIN(out_bytes, in_bytes);
+
+ if (out_bytes) {
+ x = xfer_setup(spi, out_buf, todo, SPI_SEND);
+ if (x < 0) {
+ if (spi->xfer_mode == XFER_MODE_NONE) {
+ spi->xfer_mode = XFER_MODE_PIO;
+ continue;
+ } else {
+ ret = -1;
+ break;
+ }
+ }
+ }
+ if (in_bytes) {
+ x = xfer_setup(spi, in_buf, todo, SPI_RECEIVE);
+ if (x < 0) {
+ if (spi->xfer_mode == XFER_MODE_NONE) {
+ spi->xfer_mode = XFER_MODE_PIO;
+ continue;
+ } else {
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Note: Some devices (such as Chrome EC) are sensitive to
+ * delays, so be careful when adding debug prints not to
+ * cause timeouts between transfers.
+ */
+ xfer_start(spi);
+ xfer_wait(spi);
+ if (xfer_finish(spi)) {
+ ret = -1;
+ break;
+ }
+
+ /* Post-processing. */
+ if (out_bytes) {
+ out_bytes -= x;
+ out_buf += x;
+ }
+ if (in_bytes) {
+ in_bytes -= x;
+ in_buf += x;
+ }
+ }
+
+ if (ret < 0) {
+ printk(BIOS_ERR, "%s: Error detected\n", __func__);
+ printk(BIOS_ERR, "Transaction size: %u, bytes remaining: "
+ "%u out / %u in\n", todo, out_bytes, in_bytes);
+ clear_fifo_status(spi);
+ }
+ return ret;
+}
+
+/* SPI as CBFS media. */
+struct tegra_spi_media {
+ struct spi_slave *slave;
+ struct cbfs_simple_buffer buffer;
+};
+
+static int tegra_spi_cbfs_open(struct cbfs_media *media)
+{
+ DEBUG_SPI("tegra_spi_cbfs_open\n");
+ return 0;
+}
+
+static int tegra_spi_cbfs_close(struct cbfs_media *media)
+{
+ DEBUG_SPI("tegra_spi_cbfs_close\n");
+ return 0;
+}
+
+#define JEDEC_READ 0x03
+#define JEDEC_READ_OUTSIZE 0x04
+#define JEDEC_FAST_READ_DUAL 0x3b
+#define JEDEC_FAST_READ_DUAL_OUTSIZE 0x05
+
+static size_t tegra_spi_cbfs_read(struct cbfs_media *media, void *dest,
+ size_t offset, size_t count)
+{
+ struct tegra_spi_media *spi = (struct tegra_spi_media *)media->context;
+ u8 spi_read_cmd[JEDEC_FAST_READ_DUAL_OUTSIZE];
+ unsigned int read_cmd_bytes;
+ int ret = count;
+ struct tegra_spi_channel *channel;
+
+ channel = to_tegra_spi(spi->slave->bus);
+
+ if (channel->dual_mode) {
+ /*
+ * Command 0x3b will interleave data only, command 0xbb will
+ * interleave the address as well. It's nice to see the address
+ * plainly when debugging, and we're mostly concerned with
+ * large transfers so the optimization of using 0xbb isn't
+ * really worthwhile.
+ */
+ spi_read_cmd[0] = JEDEC_FAST_READ_DUAL;
+ spi_read_cmd[4] = 0x00; /* dummy byte */
+ read_cmd_bytes = JEDEC_FAST_READ_DUAL_OUTSIZE;
+ } else {
+ spi_read_cmd[0] = JEDEC_READ;
+ read_cmd_bytes = JEDEC_READ_OUTSIZE;
+ }
+ spi_read_cmd[1] = (offset >> 16) & 0xff;
+ spi_read_cmd[2] = (offset >> 8) & 0xff;
+ spi_read_cmd[3] = offset & 0xff;
+
+ spi_claim_bus(spi->slave);
+
+ if (spi_xfer(spi->slave, spi_read_cmd,
+ read_cmd_bytes, NULL, 0) < 0) {
+ ret = -1;
+ printk(BIOS_ERR, "%s: Failed to transfer %zu bytes\n",
+ __func__, sizeof(spi_read_cmd));
+ goto tegra_spi_cbfs_read_exit;
+ }
+
+ if (channel->dual_mode) {
+ setbits_le32(&channel->regs->command1, SPI_CMD1_BOTH_EN_BIT);
+ }
+ if (spi_xfer(spi->slave, NULL, 0, dest, count)) {
+ ret = -1;
+ printk(BIOS_ERR, "%s: Failed to transfer %zu bytes\n",
+ __func__, count);
+ }
+ if (channel->dual_mode)
+ clrbits_le32(&channel->regs->command1, SPI_CMD1_BOTH_EN_BIT);
+
+tegra_spi_cbfs_read_exit:
+ /* de-assert /CS */
+ spi_release_bus(spi->slave);
+ return (ret < 0) ? 0 : ret;
+}
+
+static void *tegra_spi_cbfs_map(struct cbfs_media *media, size_t offset,
+ size_t count)
+{
+ struct tegra_spi_media *spi = (struct tegra_spi_media*)media->context;
+ void *map;
+ DEBUG_SPI("tegra_spi_cbfs_map\n");
+ map = cbfs_simple_buffer_map(&spi->buffer, media, offset, count);
+ return map;
+}
+
+static void *tegra_spi_cbfs_unmap(struct cbfs_media *media,
+ const void *address)
+{
+ struct tegra_spi_media *spi = (struct tegra_spi_media*)media->context;
+ DEBUG_SPI("tegra_spi_cbfs_unmap\n");
+ return cbfs_simple_buffer_unmap(&spi->buffer, address);
+}
+
+int initialize_tegra_spi_cbfs_media(struct cbfs_media *media,
+ void *buffer_address,
+ size_t buffer_size)
+{
+ // TODO Replace static variable to support multiple streams.
+ static struct tegra_spi_media context;
+ static struct tegra_spi_channel *channel;
+
+ channel = &tegra_spi_channels[CONFIG_BOOT_MEDIA_SPI_BUS - 1];
+ channel->slave.cs = CONFIG_BOOT_MEDIA_SPI_CHIP_SELECT;
+
+ DEBUG_SPI("Initializing CBFS media on SPI\n");
+
+ context.slave = &channel->slave;
+ context.buffer.allocated = context.buffer.last_allocate = 0;
+ context.buffer.buffer = buffer_address;
+ context.buffer.size = buffer_size;
+ media->context = (void*)&context;
+ media->open = tegra_spi_cbfs_open;
+ media->close = tegra_spi_cbfs_close;
+ media->read = tegra_spi_cbfs_read;
+ media->map = tegra_spi_cbfs_map;
+ media->unmap = tegra_spi_cbfs_unmap;
+
+#if CONFIG_SPI_FLASH_FAST_READ_DUAL_OUTPUT_3B == 1
+ channel->dual_mode = 1;
+#endif
+
+ return 0;
+}
+
+struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs)
+{
+ struct tegra_spi_channel *channel = to_tegra_spi(bus);
+ if (!channel)
+ return NULL;
+
+ return &channel->slave;
+}
diff --git a/src/soc/nvidia/tegra132/spi.h b/src/soc/nvidia/tegra132/spi.h
new file mode 100644
index 0000000000..e53505925b
--- /dev/null
+++ b/src/soc/nvidia/tegra132/spi.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2014 Google Inc.
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __NVIDIA_TEGRA132_SPI_H__
+#define __NVIDIA_TEGRA132_SPI_H__
+
+#include <spi-generic.h>
+#include <stddef.h>
+
+#include "dma.h"
+
+struct tegra_spi_regs {
+ u32 command1; /* 0x000: SPI_COMMAND1 */
+ u32 command2; /* 0x004: SPI_COMMAND2 */
+ u32 timing1; /* 0x008: SPI_CS_TIM1 */
+ u32 timing2; /* 0x00c: SPI_CS_TIM2 */
+ u32 trans_status; /* 0x010: SPI_TRANS_STATUS */
+ u32 fifo_status; /* 0x014: SPI_FIFO_STATUS */
+ u32 tx_data; /* 0x018: SPI_TX_DATA */
+ u32 rx_data; /* 0x01c: SPI_RX_DATA */
+ u32 dma_ctl; /* 0x020: SPI_DMA_CTL */
+ u32 dma_blk; /* 0x024: SPI_DMA_BLK */
+ u32 rsvd[56]; /* 0x028-0x107: reserved */
+ u32 tx_fifo; /* 0x108: SPI_FIFO1 */
+ u32 rsvd2[31]; /* 0x10c-0x187 reserved */
+ u32 rx_fifo; /* 0x188: SPI_FIFO2 */
+ u32 spare_ctl; /* 0x18c: SPI_SPARE_CTRL */
+} __attribute__((packed));
+check_member(tegra_spi_regs, spare_ctl, 0x18c);
+
+enum spi_xfer_mode {
+ XFER_MODE_NONE = 0,
+ XFER_MODE_PIO,
+ XFER_MODE_DMA,
+};
+
+struct tegra_spi_channel {
+ struct tegra_spi_regs *regs;
+
+ /* static configuration */
+ struct spi_slave slave;
+ unsigned int req_sel;
+
+ int dual_mode; /* for x2 transfers with bit interleaving */
+
+ /* context (used internally) */
+ u8 *in_buf, *out_buf;
+ struct apb_dma_channel *dma_out, *dma_in;
+ enum spi_xfer_mode xfer_mode;
+};
+
+struct cbfs_media;
+int initialize_tegra_spi_cbfs_media(struct cbfs_media *media,
+ void *buffer_address,
+ size_t buffer_size);
+
+struct tegra_spi_channel *tegra_spi_init(unsigned int bus);
+
+#endif /* __NVIDIA_TEGRA132_SPI_H__ */