aboutsummaryrefslogtreecommitdiff
path: root/src/arch/arm/armv7
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm/armv7')
-rw-r--r--src/arch/arm/armv7/bootblock.S2
-rw-r--r--src/arch/arm/armv7/bootblock_simple.c17
-rw-r--r--src/arch/arm/armv7/cache.c79
-rw-r--r--src/arch/arm/armv7/cpu.S31
-rw-r--r--src/arch/arm/armv7/mmu.c13
5 files changed, 54 insertions, 88 deletions
diff --git a/src/arch/arm/armv7/bootblock.S b/src/arch/arm/armv7/bootblock.S
index 4258caf439..9ed5d543aa 100644
--- a/src/arch/arm/armv7/bootblock.S
+++ b/src/arch/arm/armv7/bootblock.S
@@ -46,6 +46,8 @@ ENDPROC(_start)
.thumb
ENTRY(_thumb_start)
+ bl arm_init_caches
+
/*
* From Cortex-A Series Programmer's Guide:
* Only CPU 0 performs initialization. Other CPUs go into WFI
diff --git a/src/arch/arm/armv7/bootblock_simple.c b/src/arch/arm/armv7/bootblock_simple.c
index 5cd59704d2..248ea4e2b5 100644
--- a/src/arch/arm/armv7/bootblock_simple.c
+++ b/src/arch/arm/armv7/bootblock_simple.c
@@ -32,23 +32,6 @@ void main(void)
{
const char *stage_name = "fallback/romstage";
void *entry;
- uint32_t sctlr;
-
- /* Globally disable MMU, caches, and branch prediction (these should
- * be disabled by default on reset) */
- sctlr = read_sctlr();
- sctlr &= ~(SCTLR_M | SCTLR_C | SCTLR_Z | SCTLR_I);
- write_sctlr(sctlr);
-
- arm_invalidate_caches();
-
- /*
- * Re-enable icache and branch prediction. MMU and dcache will be
- * set up later.
- */
- sctlr = read_sctlr();
- sctlr |= SCTLR_Z | SCTLR_I;
- write_sctlr(sctlr);
bootblock_cpu_init();
bootblock_mainboard_init();
diff --git a/src/arch/arm/armv7/cache.c b/src/arch/arm/armv7/cache.c
index 7db86c81f1..31819f7f48 100644
--- a/src/arch/arm/armv7/cache.c
+++ b/src/arch/arm/armv7/cache.c
@@ -37,26 +37,9 @@
void tlb_invalidate_all(void)
{
- /*
- * FIXME: ARMv7 Architecture Ref. Manual claims that the distinction
- * instruction vs. data TLBs is deprecated in ARMv7, however this does
- * not seem to be the case as of Cortex-A15.
- */
+ /* TLBIALL includes dTLB and iTLB on systems that have them. */
tlbiall();
- dtlbiall();
- itlbiall();
- isb();
dsb();
-}
-
-void icache_invalidate_all(void)
-{
- /*
- * icache can be entirely invalidated with one operation.
- * Note: If branch predictors are architecturally-visible, ICIALLU
- * also performs a BPIALL operation (B2-1283 in arch manual)
- */
- iciallu();
isb();
}
@@ -133,6 +116,11 @@ void dcache_invalidate_by_mva(void const *addr, size_t len)
dcache_op_mva(addr, len, OP_DCIMVAC);
}
+/*
+ * CAUTION: This implementation assumes that coreboot never uses non-identity
+ * page tables for pages containing executed code. If you ever want to violate
+ * this assumption, have fun figuring out the associated problems on your own.
+ */
void dcache_mmu_disable(void)
{
uint32_t sctlr;
@@ -143,64 +131,19 @@ void dcache_mmu_disable(void)
write_sctlr(sctlr);
}
-
void dcache_mmu_enable(void)
{
uint32_t sctlr;
sctlr = read_sctlr();
- dcache_clean_invalidate_all();
sctlr |= SCTLR_C | SCTLR_M;
write_sctlr(sctlr);
}
-void arm_invalidate_caches(void)
+void cache_sync_instructions(void)
{
- uint32_t clidr;
- int level;
-
- /* Invalidate branch predictor */
- bpiall();
-
- /* Iterate thru each cache identified in CLIDR and invalidate */
- clidr = read_clidr();
- for (level = 0; level < 7; level++) {
- unsigned int ctype = (clidr >> (level * 3)) & 0x7;
- uint32_t csselr;
-
- switch(ctype) {
- case 0x0:
- /* no cache */
- break;
- case 0x1:
- /* icache only */
- csselr = (level << 1) | 1;
- write_csselr(csselr);
- icache_invalidate_all();
- break;
- case 0x2:
- case 0x4:
- /* dcache only or unified cache */
- csselr = level << 1;
- write_csselr(csselr);
- dcache_invalidate_all();
- break;
- case 0x3:
- /* separate icache and dcache */
- csselr = (level << 1) | 1;
- write_csselr(csselr);
- icache_invalidate_all();
-
- csselr = level << 1;
- write_csselr(csselr);
- dcache_invalidate_all();
- break;
- default:
- /* reserved */
- break;
- }
- }
-
- /* Invalidate TLB */
- tlb_invalidate_all();
+ dcache_clean_all(); /* includes trailing DSB (in assembly) */
+ iciallu(); /* includes BPIALLU (architecturally) */
+ dsb();
+ isb();
}
diff --git a/src/arch/arm/armv7/cpu.S b/src/arch/arm/armv7/cpu.S
index 29a19e76df..5738116c21 100644
--- a/src/arch/arm/armv7/cpu.S
+++ b/src/arch/arm/armv7/cpu.S
@@ -104,6 +104,37 @@
bx lr
.endm
+/*
+ * Bring an ARM processor we just gained control of (e.g. from IROM) into a
+ * known state regarding caches/SCTLR. Completely cleans and invalidates
+ * icache/dcache, disables MMU and dcache (if active), and enables unaligned
+ * accesses, icache and branch prediction (if inactive). Clobbers r4 and r5.
+ */
+ENTRY(arm_init_caches)
+ /* r4: SCTLR, return address: r5 (stay valid for the whole function) */
+ mov r5, lr
+ mrc p15, 0, r4, c1, c0, 0
+
+ /* Activate ICache (12) and Branch Prediction (11) already for speed */
+ orr r4, # (1 << 11) | (1 << 12)
+ mcr p15, 0, r4, c1, c0, 0
+
+ /* Flush and invalidate dcache in ascending order */
+ bl dcache_clean_invalidate_all
+
+ /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
+ and r4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
+ mcr p15, 0, r4, c1, c0, 0
+
+ /* Invalidate icache and TLB for good measure */
+ mcr p15, 0, r0, c7, c5, 0
+ mcr p15, 0, r0, c8, c7, 0
+ dsb
+ isb
+
+ bx r5
+ENDPROC(arm_init_caches)
+
ENTRY(dcache_invalidate_all)
dcache_apply_all crm=c6
ENDPROC(dcache_invalidate_all)
diff --git a/src/arch/arm/armv7/mmu.c b/src/arch/arm/armv7/mmu.c
index cc915a6a33..d71003057a 100644
--- a/src/arch/arm/armv7/mmu.c
+++ b/src/arch/arm/armv7/mmu.c
@@ -106,11 +106,18 @@ void mmu_config_range(unsigned long start_mb, unsigned long size_mb,
for (i = start_mb; i < start_mb + size_mb; i++)
writel((i << 20) | attr, &ttb_entry[i]);
- /* Flush the page table entries, and old translations from the TLB. */
- for (i = start_mb; i < start_mb + size_mb; i++) {
+ /* Flush the page table entries from the dcache. */
+ for (i = start_mb; i < start_mb + size_mb; i++)
dccmvac((uintptr_t)&ttb_entry[i]);
+
+ dsb();
+
+ /* Invalidate the TLB entries. */
+ for (i = start_mb; i < start_mb + size_mb; i++)
tlbimvaa(i*MiB);
- }
+
+ dsb();
+ isb();
}
void mmu_init(void)