summaryrefslogtreecommitdiff
path: root/src/cpu/intel/common/common_init.c
blob: 6203922fd8296f81f88424c2ec8393929fcb96fe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/* SPDX-License-Identifier: GPL-2.0-only */

#include <acpi/acpigen.h>
#include <arch/cpu.h>
#include <console/console.h>
#include <cpu/intel/msr.h>
#include <cpu/x86/lapic.h>
#include <cpu/x86/msr.h>
#include "common.h"

#define  CPUID_6_ECX_EPB	(1 << 3)

void set_vmx_and_lock(void)
{
	set_feature_ctrl_vmx();
	set_feature_ctrl_lock();
}

void set_feature_ctrl_vmx(void)
{
	msr_t msr;
	uint32_t feature_flag;
	int enable = CONFIG(ENABLE_VMX);

	feature_flag = cpu_get_feature_flags_ecx();
	/* Check that the VMX is supported before reading or writing the MSR. */
	if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
		printk(BIOS_DEBUG, "CPU doesn't support VMX; exiting\n");
		return;
	}

	msr = rdmsr(IA32_FEATURE_CONTROL);

	if (msr.lo & (1 << 0)) {
		printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked; ");
		printk(BIOS_DEBUG, "VMX status: %s\n", msr.lo & (1 << 2)  ?
			"enabled" : "disabled");
		/* IA32_FEATURE_CONTROL locked. If we set it again we get an
		 * illegal instruction
		 */
		return;
	}

	/* The IA32_FEATURE_CONTROL MSR may initialize with random values.
	 * It must be cleared regardless of VMX config setting.
	 */
	msr.hi = msr.lo = 0;

	if (enable) {
		msr.lo |= (1 << 2);
		if (feature_flag & CPUID_SMX) {
			msr.lo |= (1 << 1);
			if (CONFIG(INTEL_TXT)) {
				/* Enable GetSec and all GetSec leaves */
				msr.lo |= (0xff << 8);
			}
		}
	}

	wrmsr(IA32_FEATURE_CONTROL, msr);

	printk(BIOS_DEBUG, "VMX status: %s\n",
		enable ? "enabled" : "disabled");
}
void set_feature_ctrl_lock(void)
{
	msr_t msr;
	int lock = CONFIG(SET_IA32_FC_LOCK_BIT);
	uint32_t feature_flag = cpu_get_feature_flags_ecx();

	/* Check if VMX is supported before reading or writing the MSR */
	if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
		printk(BIOS_DEBUG, "Read IA32_FEATURE_CONTROL unsupported\n");
		return;
	}

	msr = rdmsr(IA32_FEATURE_CONTROL);

	if (msr.lo & (1 << 0)) {
		printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n");
		/* IA32_FEATURE_CONTROL locked. If we set it again we get an
		 * illegal instruction
		 */
		return;
	}

	if (lock) {
		/* Set lock bit */
		msr.lo |= (1 << 0);
		wrmsr(IA32_FEATURE_CONTROL, msr);
	}

	printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL status: %s\n",
		lock ? "locked" : "unlocked");
}

/*
 * Init cppc_config in a way that's appropriate for Intel
 * processors with Intel Enhanced Speed Step Technology.
 * NOTE: version 2 is expected to be the typical use case.
 * For now this function 'punts' on version 3 and just
 * populates the additional fields with 'unsupported'.
 */
void cpu_init_cppc_config(struct cppc_config *config, u32 version)
{
	config->version = version;

	config->regs[CPPC_HIGHEST_PERF]			= ACPI_REG_MSR(IA32_HWP_CAPABILITIES, 0, 8);
	config->regs[CPPC_NOMINAL_PERF]			= ACPI_REG_MSR(MSR_PLATFORM_INFO, 8, 8);
	config->regs[CPPC_LOWEST_NONL_PERF]		= ACPI_REG_MSR(IA32_HWP_CAPABILITIES, 16, 8);
	config->regs[CPPC_LOWEST_PERF]			= ACPI_REG_MSR(IA32_HWP_CAPABILITIES, 24, 8);
	config->regs[CPPC_GUARANTEED_PERF]		= ACPI_REG_MSR(IA32_HWP_CAPABILITIES, 8, 8);
	config->regs[CPPC_DESIRED_PERF]			= ACPI_REG_MSR(IA32_HWP_REQUEST, 16, 8);
	config->regs[CPPC_MIN_PERF]			= ACPI_REG_MSR(IA32_HWP_REQUEST, 0, 8);
	config->regs[CPPC_MAX_PERF]			= ACPI_REG_MSR(IA32_HWP_REQUEST, 8, 8);
	config->regs[CPPC_PERF_REDUCE_TOLERANCE]	= ACPI_REG_UNSUPPORTED;
	config->regs[CPPC_TIME_WINDOW]			= ACPI_REG_UNSUPPORTED;
	config->regs[CPPC_COUNTER_WRAP]			= ACPI_REG_UNSUPPORTED;
	config->regs[CPPC_REF_PERF_COUNTER]		= ACPI_REG_MSR(IA32_MPERF, 0, 64);
	config->regs[CPPC_DELIVERED_PERF_COUNTER]	= ACPI_REG_MSR(IA32_APERF, 0, 64);
	config->regs[CPPC_PERF_LIMITED]			= ACPI_REG_MSR(IA32_HWP_STATUS, 2, 1);
	config->regs[CPPC_ENABLE]			= ACPI_REG_MSR(IA32_PM_ENABLE, 0, 1);

	if (version < 2)
		return;

	config->regs[CPPC_AUTO_SELECT] = (acpi_addr_t){
		.space_id    = ACPI_ADDRESS_SPACE_MEMORY,
		.bit_width   = 32,
		.bit_offset  = 0,
		.access_size = ACPI_ACCESS_SIZE_UNDEFINED,
		.addrl       = 1,
	};

	config->regs[CPPC_AUTO_ACTIVITY_WINDOW]		= ACPI_REG_MSR(IA32_HWP_REQUEST, 32, 10);
	config->regs[CPPC_PERF_PREF]			= ACPI_REG_MSR(IA32_HWP_REQUEST, 24, 8);
	config->regs[CPPC_REF_PERF]			= ACPI_REG_UNSUPPORTED;

	if (version < 3)
		return;

	config->regs[CPPC_LOWEST_FREQ]			= ACPI_REG_UNSUPPORTED;
	config->regs[CPPC_NOMINAL_FREQ]			= ACPI_REG_UNSUPPORTED;
}

void set_aesni_lock(void)
{
	msr_t msr;

	if (!CONFIG(SET_MSR_AESNI_LOCK_BIT))
		return;

	if (!(cpu_get_feature_flags_ecx() & CPUID_AES))
		return;

	/* Only run once per core as specified in the MSR datasheet */
	if (intel_ht_sibling())
		return;

	msr = rdmsr(MSR_FEATURE_CONFIG);
	if (msr.lo & AESNI_LOCK)
		return;

	msr_set(MSR_FEATURE_CONFIG, AESNI_LOCK);
}

void enable_lapic_tpr(void)
{
	msr_unset(MSR_PIC_MSG_CONTROL, TPR_UPDATES_DISABLE);
}

void configure_dca_cap(void)
{
	if (cpu_get_feature_flags_ecx() & CPUID_DCA)
		msr_set(IA32_PLATFORM_DCA_CAP, DCA_TYPE0_EN);
}

void set_energy_perf_bias(u8 policy)
{
	u8 epb = policy & ENERGY_POLICY_MASK;

	if (!(cpuid_ecx(6) & CPUID_6_ECX_EPB))
		return;

	msr_unset_and_set(IA32_ENERGY_PERF_BIAS, ENERGY_POLICY_MASK, epb);
	printk(BIOS_DEBUG, "cpu: energy policy set to %u\n", epb);
}