1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2015 Intel Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define __SIMPLE_DEVICE__
#include <assert.h>
#include <stdint.h>
#include <arch/io.h>
#include <cpu/x86/msr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/amd/amdfam15.h>
#include <cbmem.h>
#include <stage_cache.h>
#include <soc/northbridge.h>
#include <soc/southbridge.h>
void backup_top_of_low_cacheable(uintptr_t ramtop)
{
biosram_write32(BIOSRAM_CBMEM_TOP, ramtop);
}
uintptr_t restore_top_of_low_cacheable(void)
{
return biosram_read32(BIOSRAM_CBMEM_TOP);
}
void *cbmem_top(void)
{
msr_t tom = rdmsr(TOP_MEM);
if (!tom.lo)
return 0;
else
/* 8MB alignment to keep MTRR usage low */
return (void *)ALIGN_DOWN(restore_top_of_low_cacheable()
- CONFIG_SMM_TSEG_SIZE, 8*MiB);
}
static uintptr_t smm_region_start(void)
{
return (uintptr_t)cbmem_top();
}
static size_t smm_region_size(void)
{
return CONFIG_SMM_TSEG_SIZE;
}
void stage_cache_external_region(void **base, size_t *size)
{
if (smm_subregion(SMM_SUBREGION_CACHE, base, size)) {
printk(BIOS_ERR, "ERROR: No cache SMM subregion.\n");
*base = NULL;
*size = 0;
}
}
void smm_region_info(void **start, size_t *size)
{
*start = (void *)smm_region_start();
*size = smm_region_size();
}
/*
* For data stored in TSEG, ensure TValid is clear so R/W access can reach
* the DRAM when not in SMM.
*/
static void clear_tvalid(void)
{
msr_t hwcr = rdmsr(HWCR_MSR);
msr_t mask = rdmsr(MSR_SMM_MASK);
int tvalid = !!(mask.lo & SMM_TSEG_VALID);
if (hwcr.lo & SMM_LOCK) {
if (!tvalid) /* not valid but locked means still accessible */
return;
printk(BIOS_ERR, "Error: can't clear TValid, already locked\n");
return;
}
mask.lo &= ~SMM_TSEG_VALID;
wrmsr(MSR_SMM_MASK, mask);
}
int smm_subregion(int sub, void **start, size_t *size)
{
uintptr_t sub_base;
size_t sub_size;
const size_t cache_size = CONFIG_SMM_RESERVED_SIZE;
sub_base = smm_region_start();
sub_size = smm_region_size();
assert(sub_size > CONFIG_SMM_RESERVED_SIZE);
switch (sub) {
case SMM_SUBREGION_HANDLER:
/* Handler starts at the base of TSEG. */
sub_size -= cache_size;
break;
case SMM_SUBREGION_CACHE:
/* External cache is in the middle of TSEG. */
sub_base += sub_size - cache_size;
sub_size = cache_size;
clear_tvalid();
break;
default:
return -1;
}
*start = (void *)sub_base;
*size = sub_size;
return 0;
}
|