summaryrefslogtreecommitdiff
path: root/src/soc
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2015-07-13 10:14:16 -0700
committerPatrick Georgi <pgeorgi@google.com>2015-07-16 22:37:11 +0200
commit7731cddaa22746f6884300177bd8c03aeeffadc2 (patch)
treef83218f23fc8de764192071ac5896d4254c1b6cd /src/soc
parentfe48f0941e309751a210cacf6435632d86009ab4 (diff)
t210: SPI driver cleanup
1. Get rid of spi_delay - Instead have a tight loop to check for the spi status 2. The first check for SPI operation complete i.e. FIFOs have been processed is the SPI_STATUS_RDY bit. Thus, tegra_spi_wait should check for this bit before reading BLOCK_COUNT or any other fifo count field. 3. Flush both TX and RX FIFOs for SEND and RECV operations for PIO and DMA. 4. No need to check for rx_fifo_count == spi_byte_count to determine pio_finish operation. RDY bit should be sufficient to ensure that the SPI operation is complete. Added assert to ensure we never hit the case of RDY bit being set, yet rx_fifo_count != spi_byte_count for PIO. BUG=chrome-os-partner:41877 BRANCH=None TEST=Compiles successfully and reboot test runs successfully for 10K+ iterations. Change-Id: I1adb9672c1503b562309a8bc6c22fe7d2271768e Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Original-Commit-Id: de1515605e17e0c6b81874f9f3c49fd0c1b92756 Original-Change-Id: I5853d0df1bfd6020a17e478040bc4c1834563fe4 Original-Signed-off-by: Furquan Shaikh <furquan@google.com> Original-Reviewed-on: https://chromium-review.googlesource.com/285141 Original-Reviewed-by: Jimmy Zhang <jimmzhang@nvidia.com> Original-Tested-by: Jimmy Zhang <jimmzhang@nvidia.com> Original-Reviewed-by: Furquan Shaikh <furquan@chromium.org> Original-Tested-by: Furquan Shaikh <furquan@chromium.org> Original-Commit-Queue: Furquan Shaikh <furquan@chromium.org> Original-Trybot-Ready: Furquan Shaikh <furquan@chromium.org> Reviewed-on: http://review.coreboot.org/10947 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Diffstat (limited to 'src/soc')
-rw-r--r--src/soc/nvidia/tegra210/spi.c99
1 files changed, 41 insertions, 58 deletions
diff --git a/src/soc/nvidia/tegra210/spi.c b/src/soc/nvidia/tegra210/spi.c
index ffb4d47c70..5946ef078b 100644
--- a/src/soc/nvidia/tegra210/spi.c
+++ b/src/soc/nvidia/tegra210/spi.c
@@ -212,12 +212,6 @@ static struct tegra_spi_channel * const to_tegra_spi(int bus) {
return &tegra_spi_channels[bus - 1];
}
-static unsigned int tegra_spi_speed(unsigned int bus)
-{
- /* FIXME: implement this properly, for now use max value (50MHz) */
- return 50000000;
-}
-
int spi_claim_bus(struct spi_slave *slave)
{
struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
@@ -329,36 +323,22 @@ static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
(SPI_STATUS_BLOCK_COUNT << SPI_STATUS_BLOCK_COUNT_SHIFT);
}
-/*
- * This calls udelay() with a calculated value based on the SPI speed and
- * number of bytes remaining to be transferred. It assumes that if the
- * calculated delay period is less than MIN_DELAY_US then it is probably
- * not worth the overhead of yielding.
- */
-#define MIN_DELAY_US 250
-static void spi_delay(struct tegra_spi_channel *spi,
- unsigned int bytes_remaining)
-{
- unsigned int ns_per_byte, delay_us;
-
- ns_per_byte = 1000000000 / (tegra_spi_speed(spi->slave.bus) / 8);
- delay_us = (ns_per_byte * bytes_remaining) / 1000;
-
- if (delay_us < MIN_DELAY_US)
- return;
-
- udelay(delay_us);
-}
-
static void tegra_spi_wait(struct tegra_spi_channel *spi)
{
- unsigned int count, dma_blk;
+ uint32_t dma_blk_count = 1 + (read32(&spi->regs->dma_blk) &
+ (SPI_DMA_CTL_BLOCK_SIZE_MASK <<
+ SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
- dma_blk = 1 + (read32(&spi->regs->dma_blk) &
- (SPI_DMA_CTL_BLOCK_SIZE_MASK << SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
+ while ((read32(&spi->regs->trans_status) & SPI_STATUS_RDY) !=
+ SPI_STATUS_RDY)
+ ;
- while ((count = spi_byte_count(spi)) != dma_blk)
- spi_delay(spi, dma_blk - count);
+ /*
+ * If RDY bit is set, we should never encounter the condition that
+ * blocks processed is not equal to the number programmed in dma_blk
+ * register.
+ */
+ ASSERT(spi_byte_count(spi) == dma_blk_count);
}
@@ -367,24 +347,32 @@ static int fifo_error(struct tegra_spi_channel *spi)
return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
}
+static void flush_fifos(struct tegra_spi_channel *spi)
+{
+ const uint32_t flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH |
+ SPI_FIFO_STATUS_RX_FIFO_FLUSH;
+
+ uint32_t fifo_status = read32(&spi->regs->fifo_status);
+ fifo_status |= flush_mask;
+ write32(&spi->regs->fifo_status, flush_mask);
+
+ while (read32(&spi->regs->fifo_status) & flush_mask)
+ ;
+}
+
static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi,
unsigned int bytes, enum spi_direction dir)
{
u8 *p = spi->out_buf;
unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
- u32 flush_mask, enable_mask;
+ u32 enable_mask;
- if (dir == SPI_SEND) {
- flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH;
+ flush_fifos(spi);
+
+ if (dir == SPI_SEND)
enable_mask = SPI_CMD1_TX_EN;
- } else {
- flush_mask = SPI_FIFO_STATUS_RX_FIFO_FLUSH;
+ else
enable_mask = SPI_CMD1_RX_EN;
- }
-
- setbits_le32(&spi->regs->fifo_status, flush_mask);
- while (read32(&spi->regs->fifo_status) & flush_mask)
- ;
/*
* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
@@ -439,24 +427,17 @@ static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
{
u8 *p = spi->in_buf;
- struct stopwatch sw;
clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN);
- /*
- * Allow some time in case the Rx FIFO does not yet have
- * all packets pushed into it. See chrome-os-partner:24215.
- */
- stopwatch_init_usecs_expire(&sw, SPI_FIFO_XFER_TIMEOUT_US);
- do {
- if (rx_fifo_count(spi) == spi_byte_count(spi))
- break;
- } while (!stopwatch_expired(&sw));
+ ASSERT(rx_fifo_count(spi) == spi_byte_count(spi));
- while (!(read32(&spi->regs->fifo_status) &
- SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
- *p = read8(&spi->regs->rx_fifo);
- p++;
+ if (p) {
+ while (!(read32(&spi->regs->fifo_status) &
+ SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
+ *p = read8(&spi->regs->rx_fifo);
+ p++;
+ }
}
if (fifo_error(spi)) {
@@ -509,6 +490,8 @@ static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi,
todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
wcount = ALIGN_DOWN(todo - TEGRA_DMA_ALIGN_BYTES, TEGRA_DMA_ALIGN_BYTES);
+ flush_fifos(spi);
+
if (dir == SPI_SEND) {
spi->dma_out = dma_claim();
if (!spi->dma_out)
@@ -605,7 +588,7 @@ static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
if (spi->dma_in) {
while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
dma_busy(spi->dma_in))
- ; /* this shouldn't take long, no udelay */
+ ;
dma_stop(spi->dma_in);
clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN);
/* Disable secure access for the channel. */
@@ -617,7 +600,7 @@ static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
if (spi->dma_out) {
while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
dma_busy(spi->dma_out))
- spi_delay(spi, todo - spi_byte_count(spi));
+ ;
clrbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN);
dma_stop(spi->dma_out);
/* Disable secure access for the channel. */