1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
/*
* Based on arch/arm/include/asm/cacheflush.h
*
* Copyright (C) 1999-2002 Russell King.
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <arch/asm.h>
/*
* flush_dcache_all()
*
* Flush the whole D-cache.
*
* Corrupted registers: x0-x7, x9-x11
* From: Linux arch/arm64/mm/cache.S
*/
ENTRY(flush_dcache_all)
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
lsr x3, x3, #23 // left align loc bit field
cbz x3, finished // if loc is 0, then no need to clean
mov x10, #0 // start clean at cache level 0
loop1:
add x2, x10, x10, lsr #1 // work out 3x current cache level
lsr x1, x0, x2 // extract cache type bits from clidr
and x1, x1, #7 // mask of the bits for current cache only
cmp x1, #2 // see what cache we have at this level
b.lt skip // skip if no cache, or just i-cache
mrs x9, daif // make CSSELR and CCSIDR access atomic
msr csselr_el1, x10 // select current cache level in csselr
isb // isb to sych the new cssr&csidr
mrs x1, ccsidr_el1 // read the new ccsidr
msr daif, x9
and x2, x1, #7 // extract the length of the cache lines
add x2, x2, #4 // add 4 (line length offset)
mov x4, #0x3ff
and x4, x4, x1, lsr #3 // find maximum number on the way size
clz x5, x4 // find bit position of way size increment
mov x7, #0x7fff
and x7, x7, x1, lsr #13 // extract max number of the index size
loop2:
mov x9, x4 // create working copy of max way size
loop3:
lsl x6, x9, x5
orr x11, x10, x6 // factor way and cache number into x11
lsl x6, x7, x2
orr x11, x11, x6 // factor index number into x11
dc cisw, x11 // clean & invalidate by set/way
subs x9, x9, #1 // decrement the way
b.ge loop3
subs x7, x7, #1 // decrement the index
b.ge loop2
skip:
add x10, x10, #2 // increment cache number
cmp x3, x10
b.gt loop1
finished:
mov x10, #0 // swith back to cache level 0
msr csselr_el1, x10 // select current cache level in csselr
dsb sy
isb
ret
ENDPROC(flush_dcache_all)
/*
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
* known state regarding caches/SCTLR. Completely cleans and invalidates
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
* accesses, icache and branch prediction (if inactive). Clobbers x4 and x5.
*/
ENTRY(arm_init_caches)
/* w4: SCTLR, return address: x8 (stay valid for the whole function) */
mov x8, x30
/* XXX: Assume that we always start running at EL3 */
mrs x4, sctlr_el3
/* FIXME: How to enable branch prediction on ARMv8? */
/* Flush and invalidate dcache */
bl flush_dcache_all
/* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
and x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
/* Activate ICache (12) already for speed */
orr x4, x4, #(1 << 12)
msr sctlr_el3, x4
/* Invalidate icache and TLB for good measure */
ic iallu
tlbi alle3
dsb sy
isb
ret x8
ENDPROC(arm_init_caches)
/* Based on u-boot transition.S */
ENTRY(switch_el3_to_el2)
mov x0, #0x5b1 /* Non-secure EL0/EL1 | HVC | 64bit EL2 */
msr scr_el3, x0
msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
mov x0, #0x33ff
msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
/* Return to the EL2_SP2 mode from EL3 */
mov x0, sp
msr sp_el2, x0 /* Migrate SP */
mrs x0, vbar_el3
msr vbar_el2, x0 /* Migrate VBAR */
mrs x0, sctlr_el3
msr sctlr_el2, x0 /* Migrate SCTLR */
mov x0, #0x3c9
msr spsr_el3, x0 /* EL2_SP2 | D | A | I | F */
msr elr_el3, x30
eret
ENDPROC(switch_el3_to_el2)
|