summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorMartin Roth <martin@coreboot.org>2021-10-01 14:28:22 -0600
committerMartin Roth <martinroth@google.com>2021-10-05 18:06:39 +0000
commit0949e739066c3509e05db2b9ed71cefaaa62205f (patch)
tree797d772f524dd668689f8c2813f3b052e84de434 /src/cpu/x86
parent6c3ece9c9ef73db5c0e02cc5a41c98f46b86c3e9 (diff)
src/acpi to src/lib: Fix spelling errors
These issues were found and fixed by codespell, a useful tool for finding spelling errors. Signed-off-by: Martin Roth <martin@coreboot.org> Change-Id: I5b8ecdfe75d99028fee820a2034466a8ad1c5e63 Reviewed-on: https://review.coreboot.org/c/coreboot/+/58080 Reviewed-by: Felix Held <felix-coreboot@felixheld.de> Reviewed-by: Angel Pons <th3fanbus@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/64bit/exit32.inc4
-rw-r--r--src/cpu/x86/pae/pgtbl.c4
-rw-r--r--src/cpu/x86/sipi_vector.S2
3 files changed, 5 insertions, 5 deletions
diff --git a/src/cpu/x86/64bit/exit32.inc b/src/cpu/x86/64bit/exit32.inc
index 91cccb535e..4d1149ee6c 100644
--- a/src/cpu/x86/64bit/exit32.inc
+++ b/src/cpu/x86/64bit/exit32.inc
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * For droping from long mode to protected mode.
+ * For dropping from long mode to protected mode.
*
* For reference see "AMD64 ArchitectureProgrammer's Manual Volume 2",
* Document 24593-Rev. 3.31-July 2019 Chapter 5.3
@@ -47,7 +47,7 @@ SetCodeSelector32:
# use iret to jump to a 32-bit offset in a new code segment
# iret will pop cs:rip, flags, then ss:rsp
- mov %ss, %ax # need to push ss, but push ss instuction
+ mov %ss, %ax # need to push ss, but push ss instruction
push %rax # not valid in x64 mode, so use ax
push %rdx # the rsp to load
pushfq # push rflags
diff --git a/src/cpu/x86/pae/pgtbl.c b/src/cpu/x86/pae/pgtbl.c
index 814dbf5c70..c8783d6234 100644
--- a/src/cpu/x86/pae/pgtbl.c
+++ b/src/cpu/x86/pae/pgtbl.c
@@ -104,7 +104,7 @@ void paging_disable_pae(void)
* Use PAE to map a page and then memset it with the pattern specified.
* In order to use PAE pagetables for virtual addressing are set up and reloaded
* on a 2MiB boundary. After the function is done, virtual addressing mode is
- * disabled again. The PAT are set to all cachable, but MTRRs still apply.
+ * disabled again. The PAT are set to all cacheable, but MTRRs still apply.
*
* Requires a scratch memory for pagetables and a virtual address for
* non identity mapped memory.
@@ -124,7 +124,7 @@ void paging_disable_pae(void)
* Content at physical address isn't preserved.
* @param length The length of the memory segment to memset
* @param dest Physical memory address to memset
- * @param pat The pattern to write to the pyhsical memory
+ * @param pat The pattern to write to the physical memory
* @return 0 on success, 1 on error
*/
int memset_pae(uint64_t dest, unsigned char pat, uint64_t length, void *pgtbl,
diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S
index 44b772bcc2..496fd345eb 100644
--- a/src/cpu/x86/sipi_vector.S
+++ b/src/cpu/x86/sipi_vector.S
@@ -57,7 +57,7 @@ _start:
movw %cs, %ax
movw %ax, %ds
- /* The gdtaddr needs to be releative to the data segment in order
+ /* The gdtaddr needs to be relative to the data segment in order
* to properly dereference it. The .text section comes first in an
* rmodule so _start can be used as a proxy for the load address. */
movl $(gdtaddr), %ebx