1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
|
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2013 ChromeOS Authors
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
#include <console/console.h>
#include <stdint.h>
#include <rmodule.h>
#include <arch/cpu.h>
#include <cpu/cpu.h>
#include <cpu/intel/microcode.h>
#include <cpu/x86/cache.h>
#include <cpu/x86/lapic.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <delay.h>
#include <device/device.h>
#include <device/path.h>
#include <lib.h>
#include <smp/atomic.h>
#include <smp/spinlock.h>
#include <thread.h>
#include "haswell.h"
/* This needs to match the layout in the .module_parametrs section. */
struct sipi_params {
u16 gdtlimit;
u32 gdt;
u16 unused;
u32 idt_ptr;
u32 stack_top;
u32 stack_size;
u32 microcode_ptr;
u32 msr_table_ptr;
u32 msr_count;
u32 c_handler;
u32 c_handler_arg;
u8 apic_to_cpu_num[CONFIG_MAX_CPUS];
} __attribute__((packed));
/* This also needs to match the assembly code for saved MSR encoding. */
struct saved_msr {
u32 index;
u32 lo;
u32 hi;
} __attribute__((packed));
/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
extern char _binary_sipi_vector_start[];
/* These symbols are defined in c_start.S. */
extern char gdt[];
extern char gdt_end[];
extern char idtarg[];
/* This table keeps track of each CPU's APIC id. */
static u8 apic_id_table[CONFIG_MAX_CPUS];
static device_t cpu_devs[CONFIG_MAX_CPUS];
/* Number of APs checked that have checked in. */
static atomic_t num_aps;
/* Number of APs that have relocated their SMM handler. */
static atomic_t num_aps_relocated_smm;
/* Barrier to stop APs from performing SMM relcoation. */
static int smm_relocation_barrier_begin __attribute__ ((aligned (64)));
/* Determine if hyperthreading is disabled. */
int ht_disabled;
static inline void mfence(void)
{
__asm__ __volatile__("mfence\t\n": : :"memory");
}
static inline void wait_for_barrier(volatile int *barrier)
{
while (*barrier == 0) {
asm ("pause");
}
}
static inline void release_barrier(volatile int *barrier)
{
*barrier = 1;
}
static void ap_wait_for_smm_relocation_begin(void)
{
wait_for_barrier(&smm_relocation_barrier_begin);
}
/* This function pointer is used by the non-BSP CPUs to initiate relocation. It
* points to either a serial or parallel SMM initiation. */
static void (*ap_initiate_smm_relocation)(void) = &smm_initiate_relocation;
/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
static int wait_for_aps(atomic_t *val, int target, int total_delay,
int delay_step)
{
int timeout = 0;
int delayed = 0;
while (atomic_read(val) != target) {
udelay(delay_step);
delayed += delay_step;
if (delayed >= total_delay) {
timeout = 1;
break;
}
}
return timeout;
}
void release_aps_for_smm_relocation(int do_parallel)
{
/* Change the AP SMM initiation function, and ensure it is visible
* before releasing the APs. */
if (do_parallel) {
ap_initiate_smm_relocation = &smm_initiate_relocation_parallel;
mfence();
}
release_barrier(&smm_relocation_barrier_begin);
/* Wait for CPUs to relocate their SMM handler up to 100ms. */
if (wait_for_aps(&num_aps_relocated_smm, atomic_read(&num_aps),
100000 /* 100 ms */, 200 /* us */))
printk(BIOS_DEBUG, "Timed out waiting for AP SMM relocation\n");
}
/* The mtrr code sets up ROM caching on the BSP, but not the others. However,
* the boot loader payload disables this. In order for Linux not to complain
* ensure the caching is disabled for tha APs before going to sleep. */
static void cleanup_rom_caching(void)
{
x86_mtrr_disable_rom_caching();
}
/* By the time APs call ap_init() caching has been setup, and microcode has
* been loaded. */
static void asmlinkage ap_init(unsigned int cpu, void *microcode_ptr)
{
struct cpu_info *info;
/* Signal that the AP has arrived. */
atomic_inc(&num_aps);
/* Ensure the local apic is enabled */
enable_lapic();
info = cpu_info();
info->index = cpu;
info->cpu = cpu_devs[cpu];
thread_init_cpu_info_non_bsp(info);
apic_id_table[info->index] = lapicid();
info->cpu->path.apic.apic_id = apic_id_table[info->index];
/* Call through the cpu driver's initialization. */
cpu_initialize(info->index);
ap_wait_for_smm_relocation_begin();
ap_initiate_smm_relocation();
/* Indicate that SMM relocation has occured on this thread. */
atomic_inc(&num_aps_relocated_smm);
/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_ptr);
/* The MTRR resources are core scoped. Therefore, there is no need
* to do the same work twice. Additionally, this check keeps the
* ROM cache enabled on the BSP since its hyperthread sibling won't
* call cleanup_rom_caching(). */
if ((lapicid() & 1) == 0)
cleanup_rom_caching();
/* FIXME(adurbin): park CPUs properly -- preferably somewhere in a
* reserved part of memory that the OS cannot get to. */
stop_this_cpu();
}
static void setup_default_sipi_vector_params(struct sipi_params *sp)
{
int i;
u8 apic_id;
u8 apic_id_inc;
sp->gdt = (u32)&gdt;
sp->gdtlimit = (u32)&gdt_end - (u32)&gdt - 1;
sp->idt_ptr = (u32)&idtarg;
sp->stack_size = CONFIG_STACK_SIZE;
sp->stack_top = (u32)&_estack;
/* Adjust the stack top to take into account cpu_info. */
sp->stack_top -= sizeof(struct cpu_info);
/* Default to linear APIC id space if HT is enabled. If it is
* disabled the APIC ids increase by 2 as the odd numbered APIC
* ids are not present.*/
apic_id_inc = (ht_disabled) ? 2 : 1;
for (i = 0, apic_id = 0; i < CONFIG_MAX_CPUS; i++) {
sp->apic_to_cpu_num[i] = apic_id;
apic_id += apic_id_inc;
}
}
#define NUM_FIXED_MTRRS 11
static unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
MTRRfix64K_00000_MSR, MTRRfix16K_80000_MSR, MTRRfix16K_A0000_MSR,
MTRRfix4K_C0000_MSR, MTRRfix4K_C8000_MSR, MTRRfix4K_D0000_MSR,
MTRRfix4K_D8000_MSR, MTRRfix4K_E0000_MSR, MTRRfix4K_E8000_MSR,
MTRRfix4K_F0000_MSR, MTRRfix4K_F8000_MSR,
};
static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
{
msr_t msr;
msr = rdmsr(index);
entry->index = index;
entry->lo = msr.lo;
entry->hi = msr.hi;
/* Return the next entry. */
entry++;
return entry;
}
static int save_bsp_msrs(char *start, int size)
{
int msr_count;
int num_var_mtrrs;
struct saved_msr *msr_entry;
int i;
msr_t msr;
/* Determine number of MTRRs need to be saved. */
msr = rdmsr(MTRRcap_MSR);
num_var_mtrrs = msr.lo & 0xff;
/* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE. */
msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
if ((msr_count * sizeof(struct saved_msr)) > size) {
printk(BIOS_CRIT, "Cannot mirror all %d msrs.\n", msr_count);
return -1;
}
msr_entry = (void *)start;
for (i = 0; i < NUM_FIXED_MTRRS; i++) {
msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
}
for (i = 0; i < num_var_mtrrs; i++) {
msr_entry = save_msr(MTRRphysBase_MSR(i), msr_entry);
msr_entry = save_msr(MTRRphysMask_MSR(i), msr_entry);
}
msr_entry = save_msr(MTRRdefType_MSR, msr_entry);
return msr_count;
}
/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
* memory range is already reserved so the OS cannot use it. That region is
* free to use for AP bringup before SMM is initialized. */
static u32 sipi_vector_location = SMM_DEFAULT_BASE;
static int sipi_vector_location_size = SMM_DEFAULT_SIZE;
static int load_sipi_vector(const void *microcode_patch)
{
struct rmodule sipi_mod;
int module_size;
int num_msrs;
struct sipi_params *sp;
char *mod_loc = (void *)sipi_vector_location;
const int loc_size = sipi_vector_location_size;
if (rmodule_parse(&_binary_sipi_vector_start, &sipi_mod)) {
printk(BIOS_CRIT, "Unable to parse sipi module.\n");
return -1;
}
if (rmodule_entry_offset(&sipi_mod) != 0) {
printk(BIOS_CRIT, "SIPI module entry offset is not 0!\n");
return -1;
}
if (rmodule_load_alignment(&sipi_mod) != 4096) {
printk(BIOS_CRIT, "SIPI module load alignment(%d) != 4096.\n",
rmodule_load_alignment(&sipi_mod));
return -1;
}
module_size = rmodule_memory_size(&sipi_mod);
/* Align to 4 bytes. */
module_size += 3;
module_size &= ~3;
if (module_size > loc_size) {
printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
module_size, loc_size);
return -1;
}
num_msrs = save_bsp_msrs(&mod_loc[module_size], loc_size - module_size);
if (num_msrs < 0) {
printk(BIOS_CRIT, "Error mirroring BSP's msrs.\n");
return -1;
}
if (rmodule_load(mod_loc, &sipi_mod)) {
printk(BIOS_CRIT, "Unable to load SIPI module.\n");
return -1;
}
sp = rmodule_parameters(&sipi_mod);
if (sp == NULL) {
printk(BIOS_CRIT, "SIPI module has no parameters.\n");
return -1;
}
setup_default_sipi_vector_params(sp);
/* Setup MSR table. */
sp->msr_table_ptr = (u32)&mod_loc[module_size];
sp->msr_count = num_msrs;
/* Provide pointer to microcode patch. */
sp->microcode_ptr = (u32)microcode_patch;
/* The microcode pointer is passed on through to the c handler so
* that it can be loaded again after SMM relocation. */
sp->c_handler_arg = (u32)microcode_patch;
sp->c_handler = (u32)&ap_init;
/* Make sure SIPI vector hits RAM so the APs that come up will see
* the startup code even if the caches are disabled. */
wbinvd();
return 0;
}
static int allocate_cpu_devices(struct bus *cpu_bus, int *total_hw_threads)
{
int i;
int num_threads;
int num_cores;
int max_cpus;
struct cpu_info *info;
msr_t msr;
info = cpu_info();
cpu_devs[info->index] = info->cpu;
apic_id_table[info->index] = info->cpu->path.apic.apic_id;
msr = rdmsr(CORE_THREAD_COUNT_MSR);
num_threads = (msr.lo >> 0) & 0xffff;
num_cores = (msr.lo >> 16) & 0xffff;
printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
num_cores, num_threads);
max_cpus = num_threads;
*total_hw_threads = num_threads;
if (num_threads > CONFIG_MAX_CPUS) {
printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
num_threads, CONFIG_MAX_CPUS);
max_cpus = CONFIG_MAX_CPUS;
}
/* Determine if hyperthreading is enabled. If not, the APIC id space
* is sparse with ids incrementing by 2 instead of 1. */
ht_disabled = num_threads == num_cores;
for (i = 1; i < max_cpus; i++) {
struct device_path cpu_path;
device_t new;
/* Build the cpu device path */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
if (ht_disabled)
cpu_path.apic.apic_id = cpu_path.apic.apic_id * 2;
/* Allocate the new cpu device structure */
new = alloc_find_dev(cpu_bus, &cpu_path);
if (new == NULL) {
printk(BIOS_CRIT, "Could not allocte cpu device\n");
max_cpus--;
}
cpu_devs[i] = new;
}
return max_cpus;
}
int setup_ap_init(struct bus *cpu_bus, int *max_cpus,
const void *microcode_patch)
{
int num_cpus;
int hw_threads;
/* Default to currently running CPU. */
num_cpus = allocate_cpu_devices(cpu_bus, &hw_threads);
/* Load the SIPI vector. */
if (load_sipi_vector(microcode_patch))
return -1;
*max_cpus = num_cpus;
if (num_cpus < hw_threads) {
printk(BIOS_CRIT,
"ERROR: More HW threads (%d) than support (%d).\n",
hw_threads, num_cpus);
return -1;
}
return 0;
}
/* Returns 1 for timeout. 0 on success. */
static int apic_wait_timeout(int total_delay, int delay_step)
{
int total = 0;
int timeout = 0;
while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) {
udelay(delay_step);
total += delay_step;
if (total >= total_delay) {
timeout = 1;
break;
}
}
return timeout;
}
int start_aps(struct bus *cpu_bus, int ap_count)
{
int sipi_vector;
if (ap_count == 0)
return 0;
/* The vector is sent as a 4k aligned address in one byte. */
sipi_vector = sipi_vector_location >> 12;
if (sipi_vector > 256) {
printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
sipi_vector);
return -1;
}
printk(BIOS_DEBUG, "Attempting to start %d APs\n", ap_count);
if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
printk(BIOS_DEBUG, "timed out. Aborting.\n");
return -1;
} else
printk(BIOS_DEBUG, "done.\n");
}
/* Send INIT IPI to all but self. */
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
LAPIC_DM_INIT);
printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
mdelay(10);
/* Send 1st SIPI */
if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
printk(BIOS_DEBUG, "timed out. Aborting.\n");
return -1;
} else
printk(BIOS_DEBUG, "done.\n");
}
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
LAPIC_DM_STARTUP | sipi_vector);
printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
printk(BIOS_DEBUG, "timed out.\n");
return -1;
} else {
printk(BIOS_DEBUG, "done.\n");
}
/* Wait for CPUs to check in up to 200 us. */
wait_for_aps(&num_aps, ap_count, 200 /* us */, 15 /* us */);
/* Send 2nd SIPI */
if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
printk(BIOS_DEBUG, "timed out. Aborting.\n");
return -1;
} else
printk(BIOS_DEBUG, "done.\n");
}
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
LAPIC_DM_STARTUP | sipi_vector);
printk(BIOS_DEBUG, "Waiting for 2nd SIPI to complete...");
if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
printk(BIOS_DEBUG, "timed out.\n");
return -1;
} else {
printk(BIOS_DEBUG, "done.\n");
}
/* Wait for CPUs to check in. */
if (wait_for_aps(&num_aps, ap_count, 10000 /* 10 ms */, 50 /* us */)) {
printk(BIOS_DEBUG, "Not all APs checked in: %d/%d.\n",
atomic_read(&num_aps), ap_count);
return -1;
}
return 0;
}
void smm_initiate_relocation_parallel(void)
{
if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) {
printk(BIOS_DEBUG, "Waiting for ICR not to be busy...");
if (apic_wait_timeout(1000 /* 1 ms */, 50)) {
printk(BIOS_DEBUG, "timed out. Aborting.\n");
return;
} else
printk(BIOS_DEBUG, "done.\n");
}
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(lapicid()));
lapic_write_around(LAPIC_ICR, LAPIC_INT_ASSERT | LAPIC_DM_SMI);
if (apic_wait_timeout(1000 /* 1 ms */, 100 /* us */)) {
printk(BIOS_DEBUG, "SMI Relocation timed out.\n");
} else
printk(BIOS_DEBUG, "Relocation complete.\n");
}
DECLARE_SPIN_LOCK(smm_relocation_lock);
void smm_initiate_relocation(void)
{
spin_lock(&smm_relocation_lock);
smm_initiate_relocation_parallel();
spin_unlock(&smm_relocation_lock);
}
|