summaryrefslogtreecommitdiff
path: root/src/soc/intel/skylake
diff options
context:
space:
mode:
authorRobbie Zhang <robbie.zhang@intel.com>2017-02-21 14:00:31 -0800
committerMartin Roth <martinroth@google.com>2017-03-23 19:57:17 +0100
commit7de031759b916bbb91e74e6eea371b5ca87e6bd5 (patch)
tree3a21679ac510c4fb68f88ea8dae4cd1537231ff6 /src/soc/intel/skylake
parent08d808ff3d2f4ed373fd7390cbf16a63bbe6a7d5 (diff)
soc/intel/skylake: Add SGX initialization
This patch implements SGX initialization steps in coreboot per Intel SGX BWG rev 2.0.8 for Kaby Lake SoC. If enabled on a Kabylake device, SoC capability and PRM (processor reserved memory) of desired size (needs to be configured through PrmrrSize) are provisioned for later software stack to use SGX (i.e., run SGX enclaves). One issue is still puzzling and needs to be addressed: by calling configure_sgx() in cpu_core_init() which is the per-thread function, SGX is always failing for thread 0 but is successful for other 3 threads. I had to call configure_sgx() again from soc_init_cpus() which is the BSP-only function to make it enable on the BSP. Another pending work is the implementation for the Owner Epoch update which shall be added later. BUG=chrome-os-partner:62438 BRANCH=NONE TEST=Tested on Eve, verified SGX activation is successful on all threads. Change-Id: I8b64284875eae061fa8e7a01204d48d320a285a9 Signed-off-by: Robbie Zhang <robbie.zhang@intel.com> Reviewed-on: https://review.coreboot.org/18445 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/soc/intel/skylake')
-rw-r--r--src/soc/intel/skylake/Makefile.inc1
-rw-r--r--src/soc/intel/skylake/chip.h3
-rw-r--r--src/soc/intel/skylake/cpu.c52
-rw-r--r--src/soc/intel/skylake/include/soc/cpu.h1
-rw-r--r--src/soc/intel/skylake/include/soc/msr.h10
-rw-r--r--src/soc/intel/skylake/sgx.c161
6 files changed, 211 insertions, 17 deletions
diff --git a/src/soc/intel/skylake/Makefile.inc b/src/soc/intel/skylake/Makefile.inc
index 2a6b15244b..1d5d89f3e5 100644
--- a/src/soc/intel/skylake/Makefile.inc
+++ b/src/soc/intel/skylake/Makefile.inc
@@ -80,6 +80,7 @@ ramstage-y += pmutil.c
ramstage-$(CONFIG_PLATFORM_USES_FSP2_0) += reset.c
ramstage-y += sata.c
ramstage-y += sd.c
+ramstage-y += sgx.c
ramstage-y += smbus.c
ramstage-y += smbus_common.c
ramstage-y += smi.c
diff --git a/src/soc/intel/skylake/chip.h b/src/soc/intel/skylake/chip.h
index fbd10a918c..85857d8200 100644
--- a/src/soc/intel/skylake/chip.h
+++ b/src/soc/intel/skylake/chip.h
@@ -459,6 +459,9 @@ struct soc_intel_skylake_config {
u8 SlowSlewRateForIa;
u8 SlowSlewRateForGt;
u8 SlowSlewRateForSa;
+
+ /* Enable SGX feature */
+ u8 sgx_enable;
};
typedef struct soc_intel_skylake_config config_t;
diff --git a/src/soc/intel/skylake/cpu.c b/src/soc/intel/skylake/cpu.c
index 0e5d0931cc..600a15f3d9 100644
--- a/src/soc/intel/skylake/cpu.c
+++ b/src/soc/intel/skylake/cpu.c
@@ -42,6 +42,10 @@
#include <soc/smm.h>
#include <soc/systemagent.h>
+/* MP initialization support. */
+static const void *microcode_patch;
+static int ht_disabled;
+
/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
static const u8 power_limit_time_sec_to_msr[] = {
[0] = 0x00,
@@ -336,11 +340,10 @@ static void set_energy_perf_bias(u8 policy)
static void configure_mca(void)
{
msr_t msr;
- const unsigned int mcg_cap_msr = 0x179;
int i;
int num_banks;
- msr = rdmsr(mcg_cap_msr);
+ msr = rdmsr(IA32_MCG_CAP);
num_banks = msr.lo & 0xff;
msr.lo = msr.hi = 0;
/*
@@ -348,8 +351,13 @@ static void configure_mca(void)
* of these banks are core vs package scope. For now every CPU clears
* every bank.
*/
- for (i = 0; i < num_banks; i++)
+ for (i = 0; i < num_banks; i++) {
+ /* Clear the machine check status */
wrmsr(IA32_MC0_STATUS + (i * 4), msr);
+ /* Initialize machine checks */
+ wrmsr(IA32_MC0_CTL + i * 4,
+ (msr_t) {.lo = 0xffffffff, .hi = 0xffffffff});
+ }
}
/* All CPUs including BSP will run the following function. */
@@ -376,6 +384,9 @@ static void cpu_core_init(device_t cpu)
/* Enable Turbo */
enable_turbo();
+
+ /* Configure SGX */
+ configure_sgx(microcode_patch);
}
static struct device_operations cpu_dev_ops = {
@@ -399,10 +410,6 @@ static const struct cpu_driver driver __cpu_driver = {
.id_table = cpu_table,
};
-/* MP initialization support. */
-static const void *microcode_patch;
-static int ht_disabled;
-
static int get_cpu_count(void)
{
msr_t msr;
@@ -500,6 +507,14 @@ static void soc_init_cpus(void *unused)
/* Thermal throttle activation offset */
configure_thermal_target();
+
+ /*
+ * TODO: somehow calling configure_sgx() in cpu_core_init() is not
+ * successful on the BSP (other threads are fine). Have to run it again
+ * here to get SGX enabled on BSP. This behavior needs to root-caused
+ * and we shall not have this redundant call.
+ */
+ configure_sgx(microcode_patch);
}
/* Ensure to re-program all MTRRs based on DRAM resource settings */
@@ -511,16 +526,25 @@ static void soc_post_cpus_init(void *unused)
int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
{
- msr_t msr;
- /* If PRMRR/SGX is supported the FIT microcode load will set the msr
+ msr_t msr1;
+ msr_t msr2;
+
+ /*
+ * If PRMRR/SGX is supported the FIT microcode load will set the msr
* 0x08b with the Patch revision id one less than the id in the
* microcode binary. The PRMRR support is indicated in the MSR
- * MTRRCAP[12]. Check for this feature and avoid reloading the
- * same microcode during CPU initialization.
+ * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the
+ * same microcode during CPU initialization. If SGX is enabled, as
+ * part of SGX BIOS initialization steps, the same microcode needs to
+ * be reloaded after the core PRMRR MSRs are programmed.
*/
- msr = rdmsr(MTRR_CAP_MSR);
- return (msr.lo & PRMRR_SUPPORTED)
- && (current_patch_id == new_patch_id - 1);
+ msr1 = rdmsr(MTRR_CAP_MSR);
+ msr2 = rdmsr(PRMRR_PHYS_BASE_MSR);
+ if (msr2.lo && (current_patch_id == new_patch_id - 1))
+ return 0;
+ else
+ return (msr1.lo & PRMRR_SUPPORTED) &&
+ (current_patch_id == new_patch_id - 1);
}
/*
diff --git a/src/soc/intel/skylake/include/soc/cpu.h b/src/soc/intel/skylake/include/soc/cpu.h
index 33fb2f1b3a..a259a2bafb 100644
--- a/src/soc/intel/skylake/include/soc/cpu.h
+++ b/src/soc/intel/skylake/include/soc/cpu.h
@@ -68,5 +68,6 @@ u32 cpu_family_model(void);
u32 cpu_stepping(void);
int cpu_is_ult(void);
int is_secondary_thread(void);
+void configure_sgx(const void *microcode_patch);
#endif
diff --git a/src/soc/intel/skylake/include/soc/msr.h b/src/soc/intel/skylake/include/soc/msr.h
index a5724477cc..36eefdb7b0 100644
--- a/src/soc/intel/skylake/include/soc/msr.h
+++ b/src/soc/intel/skylake/include/soc/msr.h
@@ -20,6 +20,7 @@
#include <intelblocks/msr.h>
#define MSR_PIC_MSG_CONTROL 0x2e
+#define MSR_BIOS_UPGD_TRIG 0x7a
#define MSR_FLEX_RATIO 0x194
#define FLEX_RATIO_LOCK (1 << 20)
#define FLEX_RATIO_EN (1 << 16)
@@ -29,13 +30,16 @@
#define ENERGY_POLICY_NORMAL 6
#define ENERGY_POLICY_POWERSAVE 15
#define IA32_PACKAGE_THERM_INTERRUPT 0x1b2
-#define EMRR_PHYS_BASE_MSR 0x1f4
-#define EMRR_PHYS_MASK_MSR 0x1f5
+#define PRMRR_PHYS_BASE_MSR 0x1f4
+#define PRMRR_PHYS_MASK_MSR 0x1f5
+#define PRMRR_PHYS_MASK_LOCK (1 << 10)
+#define PRMRR_PHYS_MASK_VALID (1 << 11)
#define IA32_PLATFORM_DCA_CAP 0x1f8
#define MSR_LT_LOCK_MEMORY 0x2e7
#define UNCORE_PRMRR_PHYS_BASE_MSR 0x2f4
#define UNCORE_PRMRR_PHYS_MASK_MSR 0x2f5
-
+#define MSR_SGX_OWNEREPOCH0 0x300
+#define MSR_SGX_OWNEREPOCH1 0x301
#define MSR_VR_CURRENT_CONFIG 0x601
#define MSR_VR_MISC_CONFIG 0x603
#define MSR_VR_MISC_CONFIG2 0x636
diff --git a/src/soc/intel/skylake/sgx.c b/src/soc/intel/skylake/sgx.c
new file mode 100644
index 0000000000..0e887de2ed
--- /dev/null
+++ b/src/soc/intel/skylake/sgx.c
@@ -0,0 +1,161 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <console/console.h>
+#include <chip.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/intel/microcode.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+#include <soc/pci_devs.h>
+
+static int is_sgx_supported(void)
+{
+ struct cpuid_result cpuid_regs;
+ msr_t msr;
+
+ cpuid_regs = cpuid_ext(0x7, 0x0); /* EBX[2] is feature capability */
+ msr = rdmsr(MTRR_CAP_MSR); /* Bit 12 is PRMRR enablement */
+ return ((cpuid_regs.ebx & 0x4) && (msr.lo & PRMRR_SUPPORTED));
+}
+
+static int configure_core_prmrr(void)
+{
+ msr_t prmrr_base;
+ msr_t prmrr_mask;
+ msr_t msr;
+
+ /*
+ * PRMRR base and mask are read from the UNCORE PRMRR MSRs
+ * that are already set in FSP-M.
+ */
+ prmrr_base = rdmsr(UNCORE_PRMRR_PHYS_BASE_MSR);
+ prmrr_mask = rdmsr(UNCORE_PRMRR_PHYS_MASK_MSR);
+ if (!prmrr_base.lo) {
+ printk(BIOS_ERR, "SGX Error: Uncore PRMRR is not set!\n");
+ return -1;
+ }
+
+ msr = rdmsr(PRMRR_PHYS_MASK_MSR);
+ /* If it is locked don't attempt to write PRMRR MSRs. */
+ if (msr.lo & PRMRR_PHYS_MASK_LOCK)
+ return 0;
+
+ /* Program core PRMRR MSRs */
+ prmrr_base.lo |= 0x6; /* Set memory attribute to cache writeback */
+ wrmsr(PRMRR_PHYS_BASE_MSR, prmrr_base);
+ prmrr_mask.lo &= ~PRMRR_PHYS_MASK_VALID; /* Do not set the valid bit */
+ prmrr_mask.lo |= PRMRR_PHYS_MASK_LOCK; /* Lock it */
+ wrmsr(PRMRR_PHYS_MASK_MSR, prmrr_mask);
+ return 0;
+}
+
+static void enable_sgx(void)
+{
+ msr_t msr;
+
+ msr = rdmsr(IA32_FEATURE_CONTROL);
+ /* Only enable it when it is not locked */
+ if ((msr.lo & 1) == 0) {
+ msr.lo |= (1 << 18); /* Enable it */
+ wrmsr(IA32_FEATURE_CONTROL, msr);
+ }
+}
+
+static void lock_sgx(void)
+{
+ msr_t msr;
+
+ msr = rdmsr(IA32_FEATURE_CONTROL);
+ /* If it is locked don't attempt to lock it again. */
+ if ((msr.lo & 1) == 0) {
+ msr.lo |= 1; /* Lock it */
+ wrmsr(IA32_FEATURE_CONTROL, msr);
+ }
+}
+
+static int owner_epoch_update(void)
+{
+ /*
+ * TODO - the Owner Epoch update mechanism is not determined yet,
+ * for PoC just write '0's to the MSRs.
+ */
+ msr_t msr = {0, 0};
+
+ wrmsr(MSR_SGX_OWNEREPOCH0, msr);
+ wrmsr(MSR_SGX_OWNEREPOCH1, msr);
+ return 0;
+}
+
+static void activate_sgx(void)
+{
+ msr_t msr;
+
+ /*
+ * Activate SGX feature by writing 1b to MSR 0x7A on all threads.
+ * BIOS must ensure bit 0 is set prior to writing to it, then read it
+ * back and verify the bit is cleared to confirm SGX activation.
+ */
+ msr = rdmsr(MSR_BIOS_UPGD_TRIG);
+ if (msr.lo & 0x1) {
+ wrmsr(MSR_BIOS_UPGD_TRIG, (msr_t) {.lo = 0x1, .hi = 0});
+ /* Read back to verify it is activated */
+ msr = rdmsr(MSR_BIOS_UPGD_TRIG);
+ if (msr.lo & 0x1)
+ printk(BIOS_ERR, "SGX activation failed.\n");
+ else
+ printk(BIOS_INFO, "SGX activation was successful.\n");
+ } else {
+ printk(BIOS_ERR, "SGX feature is deactivated.\n");
+ }
+}
+
+void configure_sgx(const void *microcode_patch)
+{
+ device_t dev = SA_DEV_ROOT;
+ config_t *conf = dev->chip_info;
+ msr_t msr;
+
+ if (!conf->sgx_enable || !is_sgx_supported())
+ return;
+
+ /* Initialize PRMRR core MSRs */
+ if (configure_core_prmrr() < 0)
+ return;
+
+ /* Enable the SGX feature */
+ enable_sgx();
+
+ /* Update the owner epoch value */
+ if (owner_epoch_update() < 0)
+ return;
+
+ /* Ensure to lock memory before reload microcode patch */
+ msr = rdmsr(MSR_LT_LOCK_MEMORY);
+ if ((msr.lo & 1) == 0) {
+ msr.lo |= 1; /* Lock it */
+ wrmsr(MSR_LT_LOCK_MEMORY, msr);
+ }
+
+ /* Reload the microcode patch */
+ intel_microcode_load_unlocked(microcode_patch);
+
+ /* Lock the SGX feature */
+ lock_sgx();
+
+ /* Activate the SGX feature */
+ activate_sgx();
+}