1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* This file is part of the coreboot project. */
#include <arch/cpu.h>
#include <device/mmio.h>
#include <soc/cpu.h>
#include <soc/power.h>
#include <string.h>
#include <types.h>
/* ACTLR, L2CTLR L2ACTLR constants used in SMP core power up. */
#define ACTLR_SMP (1 << 6)
#define L2CTLR_ECC_PARITY (1 << 21)
#define L2CTLR_DATA_RAM_LATENCY_MASK (7 << 0)
#define L2CTLR_TAG_RAM_LATENCY_MASK (7 << 6)
#define L2CTLR_DATA_RAM_LATENCY_CYCLES_3 (2 << 0)
#define L2CTLR_TAG_RAM_LATENCY_CYCLES_3 (2 << 6)
#define L2ACTLR_DISABLE_CLEAN_EVICT_PUSH_EXTERNAL (1 << 3)
#define L2ACTLR_ENABLE_HAZARD_DETECT_TIMEOUT (1 << 7)
#define L2ACTLR_FORCE_L2_LOGIC_CLOCK_ENABLE_ACTIVE (1 << 27)
/* Part number in CPU ID (MPIDR). */
#define PART_NUMBER_CORTEX_A15 (0xc0f)
/* State of CPU cores in Exynos 5420. */
#define CORE_STATE_RESET (1 << 0)
#define CORE_STATE_SECONDARY_RESET (1 << 1)
#define CORE_STATE_SWITCH_CLUSTER (1 << 4)
/* The default address to re-power on a code. */
#define CORE_RESET_INIT_ADDRESS ((void *)0x00000000)
/* Vectors in BL1 (0x02020000 = base of iRAM). */
#define VECTOR_CORE_SEV_HANDLER ((void *)(intptr_t)0x02020004)
#define VECTOR_LOW_POWER_FLAG ((void *)(intptr_t)0x02020028)
#define VECTOR_LOW_POWER_ADDRESS ((void *)(intptr_t)0x0202002C)
/* The data structure for the "CPU state" memory page (shared with kernel)
* controlling cores in active cluster. Kernel will put starting address for one
* core in "hotplug_address" before power on. Note the address is hard-coded in
* kernel (EXYNOS5420_PA_SYSRAM_NS = 0x02073000). */
volatile struct exynos5420_cpu_states
{
uint32_t _reserved[2]; /* RESV, +0x00 */
uint32_t resume_address; /* REG0, +0x08 */
uint32_t resume_flag; /* REG1, +0x0C */
uint32_t _reg2; /* REG2, +0x10 */
uint32_t _reg3; /* REG3, +0x14 */
uint32_t switch_address; /* REG4, +0x18, cluster switching */
uint32_t hotplug_address; /* REG5, +0x1C, core hotplug */
uint32_t _reg6; /* REG6, +0x20 */
uint32_t c2_address; /* REG7, +0x24, C2 state change */
/* Managed per core status for active cluster, offset: +0x28~0x38 */
uint32_t cpu_states[4];
/* Managed per core GIC status for active cluster, offset: 0x38~0x48 */
uint32_t cpu_gic_states[4];
} *exynos_cpu_states = (volatile struct exynos5420_cpu_states*)0x02073000;
/* When leaving core handlers and jump to hot-plug address (or cluster
* switching), we are not sure if the destination is Thumb or ARM mode.
* So a BX command is required.
*/
inline static void jump_bx(void *address)
{
asm volatile ("bx %0" : : "r"(address));
/* never returns. */
}
/* Extracts arbitrary bits from a 32-bit unsigned int. */
inline static uint32_t get_bits(uint32_t value, uint32_t start, uint32_t len)
{
return ((value << (sizeof(value) * 8 - len - start)) >>
(sizeof(value) * 8 - len));
}
/* Waits the referenced address to be ready (non-zero) and then jump into it. */
static void wait_and_jump(volatile uint32_t *reference)
{
while (!*reference) {
wfe();
}
jump_bx((void *)*reference);
}
/* Configures L2 Control Register to use 3 cycles for DATA/TAG RAM latency. */
static void configure_l2ctlr(void)
{
uint32_t val;
val = read_l2ctlr();
val &= ~(L2CTLR_DATA_RAM_LATENCY_MASK | L2CTLR_TAG_RAM_LATENCY_MASK);
val |= (L2CTLR_DATA_RAM_LATENCY_CYCLES_3 | L2CTLR_TAG_RAM_LATENCY_CYCLES_3 |
L2CTLR_ECC_PARITY);
write_l2ctlr(val);
}
/* Configures L2 Auxiliary Control Register for Cortex A15. */
static void configure_l2actlr(void)
{
uint32_t val;
val = read_l2actlr();
val |= (L2ACTLR_DISABLE_CLEAN_EVICT_PUSH_EXTERNAL |
L2ACTLR_ENABLE_HAZARD_DETECT_TIMEOUT |
L2ACTLR_FORCE_L2_LOGIC_CLOCK_ENABLE_ACTIVE);
write_l2actlr(val);
}
/* Initializes the CPU states to reset state. */
static void init_exynos_cpu_states(void) {
memset((void *)exynos_cpu_states, 0, sizeof(*exynos_cpu_states));
exynos_cpu_states->cpu_states[0] = CORE_STATE_RESET;
exynos_cpu_states->cpu_states[1] = CORE_STATE_SECONDARY_RESET;
exynos_cpu_states->cpu_states[2] = CORE_STATE_SECONDARY_RESET;
exynos_cpu_states->cpu_states[3] = CORE_STATE_SECONDARY_RESET;
}
/*
* Ensures that the L2 logic has been used within the previous 256 cycles
* before modifying the ACTLR.SMP bit. This is required during boot before
* MMU has been enabled, or during a specified reset or power down sequence.
*/
static void enable_smp(void)
{
uint32_t actlr, val;
/* Enable SMP mode */
actlr = read_actlr();
actlr |= ACTLR_SMP;
/* Dummy read to assure L2 access */
val = read32(&exynos_power->inform0);
val &= 0;
actlr |= val;
write_actlr(actlr);
dsb();
isb();
}
/* Starts the core and jumps to correct location by its state. */
static void core_start_execution(void)
{
u32 cpu_id, cpu_state;
enable_smp();
set_system_mode();
cpu_id = read_mpidr() & 0x3; /* up to 4 processors for one cluster. */
cpu_state = exynos_cpu_states->cpu_states[cpu_id];
if (cpu_state & CORE_STATE_SWITCH_CLUSTER) {
wait_and_jump(&exynos_cpu_states->switch_address);
/* never returns. */
}
/* Standard Exynos suspend/resume. */
if (exynos_power->inform1) {
exynos_power->inform1 = 0;
jump_bx((void *)exynos_power->inform0);
/* never returns. */
}
if (cpu_state & CORE_STATE_RESET) {
/* For Reset, U-Boot jumps to its starting address;
* on coreboot, seems ok to ignore for now. */
}
wait_and_jump(&exynos_cpu_states->hotplug_address);
/* never returns. */
}
/* The entry point for hotplug-in and cluster switching. */
static void low_power_start(void)
{
uint32_t sctlr, reg_val;
/* On warm reset, because iRAM is not cleared, all cores will enter
* low_power_start, not the initial address. So we need to check reset
* status again, and jump to 0x0 in that case. */
reg_val = read32(&exynos_power->spare0);
if (reg_val != RST_FLAG_VAL) {
write32(VECTOR_LOW_POWER_FLAG, 0x0);
jump_bx(CORE_RESET_INIT_ADDRESS);
/* restart CPU execution and never returns. */
}
/* Workaround for iROM EVT1. A7 core execution may flow into incorrect
* path, bypassing first jump address and makes final jump address 0x0,
* so we try to make any core set again low_power_start address, if that
* becomes zero. */
reg_val = read32(VECTOR_CORE_SEV_HANDLER);
if (reg_val != (intptr_t)low_power_start) {
write32(VECTOR_CORE_SEV_HANDLER, (intptr_t)low_power_start);
dsb();
/* ask all cores to power on again. */
sev();
}
set_system_mode();
/* Whenever a Cortex A-15 core powers on, iROM resets its L2 cache
* so we need to configure again. */
if (get_bits(read_midr(), 4, 12) == PART_NUMBER_CORTEX_A15) {
configure_l2ctlr();
configure_l2actlr();
}
/* Invalidate L1 & TLB */
tlbiall();
iciallu();
/* Disable MMU stuff and caches */
sctlr = read_sctlr();
sctlr &= ~(SCTLR_V | SCTLR_M | SCTLR_C);
sctlr |= (SCTLR_I | SCTLR_Z | SCTLR_A);
write_sctlr(sctlr);
core_start_execution();
/* The core should not return. But in order to prevent unexpected
* errors, a WFI command will help to put CPU back to idle state. */
wfi();
}
/* Callback to shutdown a core, safe to be set as hot-plug address. */
static void power_down_core(void)
{
uint32_t mpidr, core_id;
/* MPIDR: 0~2=ID, 8~11=cluster. On Exynos 5420, cluster will be only 0
* or 1. */
mpidr = read_mpidr();
core_id = get_bits(mpidr, 0, 2) | (get_bits(mpidr, 8, 4) << 2);
/* Set the status of the core to low.
* S5E5420A User Manual, 8.8.1.202, ARM_CORE0_CONFIGURATION, two bits to
* control power state in each power down level.
*/
write32(&exynos_power->arm_core[core_id].config, 0x0);
/* S5E5420A User Manual, 8.4.2.5, after ARM_CORE*_CONFIGURATION has been
* set to zero, PMU will detect and wait for WFI then run power-down
* sequence. */
wfi();
}
/* Configures the CPU states shared memory page and then shutdown all cores. */
static void configure_secondary_cores(void)
{
if (get_bits(read_midr(), 4, 12) == PART_NUMBER_CORTEX_A15) {
configure_l2ctlr();
configure_l2actlr();
}
/* Currently we use power_down_core as callback for each core to
* shutdown itself, but it is also ok to directly set ARM_CORE*_CONFIG
* to zero by CPU0 because every secondary cores should be already in
* WFI state (in bootblock). The power_down_core will be more helpful
* when we want to use SMP inside firmware. */
/* Clear boot reg (hotplug address) in CPU states */
write32((void *)&exynos_cpu_states->hotplug_address, 0);
/* set low_power flag and address */
write32(VECTOR_LOW_POWER_ADDRESS, (intptr_t)low_power_start);
write32(VECTOR_LOW_POWER_FLAG, RST_FLAG_VAL);
write32(&exynos_power->spare0, RST_FLAG_VAL);
/* On next SEV, shutdown all cores. */
write32(VECTOR_CORE_SEV_HANDLER, (intptr_t)power_down_core);
/* Ask all cores in WFE mode to shutdown. */
dsb();
sev();
}
/* Configures the SMP cores on Exynos 5420 SOC (and shutdown all secondary
* cores) */
void exynos5420_config_smp(void)
{
init_exynos_cpu_states();
configure_secondary_cores();
}
|