diff options
Diffstat (limited to 'src/mainboard/emulation/qemu-q35/smihandler.c')
-rw-r--r-- | src/mainboard/emulation/qemu-q35/smihandler.c | 317 |
1 files changed, 317 insertions, 0 deletions
diff --git a/src/mainboard/emulation/qemu-q35/smihandler.c b/src/mainboard/emulation/qemu-q35/smihandler.c new file mode 100644 index 0000000000..a0417b5305 --- /dev/null +++ b/src/mainboard/emulation/qemu-q35/smihandler.c @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <cpu/amd/amd64_save_state.h> +#include <cpu/x86/legacy_save_state.h> +#include <cpu/x86/save_state.h> +#include <cpu/x86/smm.h> +#include <elog.h> +#include <smmstore.h> +#include <southbridge/intel/common/pmbase.h> +#include <southbridge/intel/common/pmutil.h> +#include <string.h> + +/* + * SMM in QEMU is unlike that on real hardware. Most notable differences: + * + * - Revision ID is either 0x20000 or 0x20064, depending on whether + * qemu-system-i386 or qemu-system-x86_64 is being used. + * - SMI_STS is always 0. + * - Since I/O Instruction Restart bit in revision ID field is not set, none of + * the fields related to I/O instructions is set in saved state. It is + * impossible to check if SMI was generated by write to APMC port that way. + * - On older versions of QEMU, SMI isn't immediately emulated when Tiny Code + * Generator (TCG) accelerator is used, due to how Translation Blocks (TBs) + * are built. This means that contents of registers may change before SMI + * handler gets invoked. This can be worked around on the caller side by + * either writing to APMC port from a non-inlined function that doesn't + * return a result (so RAX isn't overwritten), or by following write to port + * with an instruction that forces generation of a new TB, e.g. 'pause'. In + * both cases, RIP in the save state will not point to the instruction + * directly following 'out'. When KVM is used, or in newer QEMU (8.2.2 is + * known to work) SMIs are injected immediately and RIP represents next + * instruction after `out`. + */ + +static const uint32_t amd64_revisions[] = { + 0x00020064, + SMM_REV_INVALID, +}; + +static int amd64_get_reg(const enum cpu_reg reg, const int node, void *out, + const uint8_t length) +{ + amd64_smm_state_save_area_t *save_state = smm_get_save_state(node); + + if (length != 1 && length != 2 && length != 4 && length != 8) + return -1; + + switch (reg) { + case RAX: + memcpy(out, &save_state->rax, length); + return 0; + case RCX: + memcpy(out, &save_state->rcx, length); + return 0; + case RDX: + memcpy(out, &save_state->rdx, length); + return 0; + case RBX: + memcpy(out, &save_state->rbx, length); + return 0; + } + + return -1; +} + +static int amd64_set_reg(const enum cpu_reg reg, const int node, void *in, + const uint8_t length) +{ + amd64_smm_state_save_area_t *save_state = smm_get_save_state(node); + + if (length != 1 && length != 2 && length != 4 && length != 8) + return -1; + + switch (reg) { + case RAX: + save_state->rax = 0; + memcpy(&save_state->rax, in, length); + return 0; + case RCX: + save_state->rcx = 0; + memcpy(&save_state->rcx, in, length); + return 0; + case RDX: + save_state->rdx = 0; + memcpy(&save_state->rdx, in, length); + return 0; + case RBX: + save_state->rbx = 0; + memcpy(&save_state->rbx, in, length); + return 0; + } + + return -1; +} + +static int amd64_apmc_node(u8 cmd) +{ + amd64_smm_state_save_area_t *save_state; + int node; + + for (node = 0; (unsigned int)node < CONFIG_MAX_CPUS; node++) { + save_state = smm_get_save_state(node); + + if (!save_state) + continue; + + /* + * Since fields related to I/O instructions are not filled in, check + * RAX against command number only. There is 1/256 probability of false + * positive. + * + * The alternative would be to: + * - parse saved CR0 and EFER to discover host's execution mode + * - parse saved CR3 and host page tables to obtain physical address + * corresponding to RIP + * - map that page (or multiple, potentially nonconsecutive pages) that + * cover the code + * - analyze the code and saved state against one of the `out` + * instructions to APM_CNT port + * - ideally do so in constant-time manner to not leak information + */ + if ((save_state->rax & 0xFF) == cmd) + return node; + } + + return -1; +} + +static const struct smm_save_state_ops _amd64_ops = { + .revision_table = amd64_revisions, + .get_reg = amd64_get_reg, + .set_reg = amd64_set_reg, + .apmc_node = amd64_apmc_node, +}; + +const struct smm_save_state_ops *amd64_ops = &_amd64_ops; + +static const uint32_t legacy_revisions[] = { + 0x00020000, + SMM_REV_INVALID, +}; + +static int legacy_get_reg(const enum cpu_reg reg, const int node, void *out, + const uint8_t length) +{ + legacy_smm_state_save_area_t *save_state = smm_get_save_state(node); + + if (length != 1 && length != 2 && length != 4) + return -1; + + switch (reg) { + case RAX: + memcpy(out, &save_state->eax, length); + return 0; + case RCX: + memcpy(out, &save_state->ecx, length); + return 0; + case RDX: + memcpy(out, &save_state->edx, length); + return 0; + case RBX: + memcpy(out, &save_state->ebx, length); + return 0; + } + + return -1; +} + +static int legacy_set_reg(const enum cpu_reg reg, const int node, void *in, + const uint8_t length) +{ + legacy_smm_state_save_area_t *save_state = smm_get_save_state(node); + + if (length != 1 && length != 2 && length != 4) + return -1; + + switch (reg) { + case RAX: + save_state->eax = 0; + memcpy(&save_state->eax, in, length); + return 0; + case RCX: + save_state->ecx = 0; + memcpy(&save_state->ecx, in, length); + return 0; + case RDX: + save_state->edx = 0; + memcpy(&save_state->edx, in, length); + return 0; + case RBX: + save_state->ebx = 0; + memcpy(&save_state->ebx, in, length); + return 0; + } + + return -1; +} + +static int legacy_apmc_node(u8 cmd) +{ + legacy_smm_state_save_area_t *save_state; + int node; + + for (node = 0; (unsigned int)node < CONFIG_MAX_CPUS; node++) { + save_state = smm_get_save_state(node); + + if (!save_state) + continue; + + /* + * Since fields related to I/O instructions are not filled in, check + * EAX against command number only. There is 1/256 probability of false + * positive. + * + * See comment in amd64_apmc_node(). + */ + if ((save_state->eax & 0xFF) == cmd) + return node; + } + + return -1; +} + +static const struct smm_save_state_ops _legacy_ops = { + .revision_table = legacy_revisions, + .get_reg = legacy_get_reg, + .set_reg = legacy_set_reg, + .apmc_node = legacy_apmc_node, +}; + +const struct smm_save_state_ops *legacy_ops = &_legacy_ops; + +static void mainboard_smi_gsmi(void) +{ + u32 ret; + u8 sub_command; + uintptr_t param; + int node = get_apmc_node(APM_CNT_ELOG_GSMI); + + if (node < 0) + return; + + /* Command and return value in EAX */ + if (get_save_state_reg(RAX, node, &ret, sizeof(ret))) + return; + + sub_command = (u8)(ret >> 8); + + /* Parameter buffer in EBX */ + if (get_save_state_reg(RBX, node, ¶m, sizeof(param))) + return; + + /* drivers/elog/gsmi.c */ + ret = gsmi_exec(sub_command, (u32 *)param); + + set_save_state_reg(RAX, node, &ret, sizeof(ret)); +} + +static void mainboard_smi_store(void) +{ + u32 ret; + u8 sub_command; + uintptr_t reg_rbx; + int node = get_apmc_node(APM_CNT_SMMSTORE); + + if (node < 0) + return; + + /* Command and return value in EAX */ + if (get_save_state_reg(RAX, node, &ret, sizeof(ret))) + return; + + sub_command = (u8)(ret >> 8); + + /* Parameter buffer in EBX */ + if (get_save_state_reg(RBX, node, ®_rbx, sizeof(reg_rbx))) + return; + + /* drivers/smmstore/smi.c */ + ret = smmstore_exec(sub_command, (void *)reg_rbx); + + set_save_state_reg(RAX, node, &ret, sizeof(ret)); +} + +static int mainboard_finalized = 0; + +void cpu_smi_handler(void) +{ + u8 reg8; + + reg8 = apm_get_apmc(); + switch (reg8) { + case APM_CNT_ACPI_DISABLE: + write_pmbase32(PM1_CNT, read_pmbase32(PM1_CNT) & ~SCI_EN); + break; + case APM_CNT_ACPI_ENABLE: + write_pmbase32(PM1_CNT, read_pmbase32(PM1_CNT) | SCI_EN); + break; + case APM_CNT_FINALIZE: + if (mainboard_finalized) { + printk(BIOS_DEBUG, "SMI#: Already finalized\n"); + return; + } + + southbridge_finalize_all(); + mainboard_finalized = 1; + break; + case APM_CNT_ELOG_GSMI: + if (CONFIG(ELOG_GSMI)) + mainboard_smi_gsmi(); + break; + case APM_CNT_SMMSTORE: + if (CONFIG(SMMSTORE)) + mainboard_smi_store(); + break; + } +} |