summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorAaron Durbin <adurbin@chromium.org>2014-02-25 20:36:56 -0600
committerAaron Durbin <adurbin@google.com>2014-03-07 15:30:27 +0100
commitc34713d33e088095acb6dd61527a26117d9c368e (patch)
tree020c5387801605f753fca25cc6b2c09ff5763e30 /src/cpu/x86
parent7274800ea37edf41cb50e899d03baa02bdeecade (diff)
x86: add MIRROR_PAYLOAD_TO_RAM_BEFORE_LOADING option
Boot speeds can be sped up by mirroring the payload into main memory before doing the actual loading. Systems that would benefit from this are typically Intel ones whose SPI are memory mapped. Without the SPI being cached all accesses to the payload in SPI while being loaded result in uncacheable accesses. Instead take advantage of the on-board SPI controller which has an internal cache and prefetcher by copying 64-byte cachelines using 32-bit word copies. Change-Id: I4aac856b1b5130fa2d68a6c45a96cfeead472a52 Signed-off-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: http://review.coreboot.org/5305 Tested-by: build bot (Jenkins) Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net> Reviewed-by: Vladimir Serbinenko <phcoder@gmail.com> Reviewed-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/Kconfig9
-rw-r--r--src/cpu/x86/Makefile.inc1
-rw-r--r--src/cpu/x86/mirror_payload.c71
3 files changed, 81 insertions, 0 deletions
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index 19fa246fe2..b5bb7e62c2 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -121,3 +121,12 @@ config BACKUP_DEFAULT_SMM_REGION
help
The cpu support will select this option if the default SMM region
needs to be backed up for suspend/resume purposes.
+
+config MIRROR_PAYLOAD_TO_RAM_BEFORE_LOADING
+ def_bool n
+ help
+ On certain platforms a boot speed gain can be realized if mirroring
+ the payload data stored in non-volatile storage. On x86 systems the
+ payload would typically live in a memory-mapped SPI part. Copying
+ the SPI contents to ram before performing the load can speed up
+ the boot process.
diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc
index d5bc2fd219..277ba484a6 100644
--- a/src/cpu/x86/Makefile.inc
+++ b/src/cpu/x86/Makefile.inc
@@ -3,6 +3,7 @@ romstage-$(CONFIG_HAVE_ACPI_RESUME) += car.c
subdirs-$(CONFIG_PARALLEL_MP) += name
ramstage-$(CONFIG_PARALLEL_MP) += mp_init.c
+ramstage-$(CONFIG_MIRROR_PAYLOAD_TO_RAM_BEFORE_LOADING) += mirror_payload.c
SIPI_ELF=$(obj)/cpu/x86/sipi_vector.elf
SIPI_BIN=$(SIPI_ELF:.elf=)
diff --git a/src/cpu/x86/mirror_payload.c b/src/cpu/x86/mirror_payload.c
new file mode 100644
index 0000000000..edd26416f2
--- /dev/null
+++ b/src/cpu/x86/mirror_payload.c
@@ -0,0 +1,71 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <console/console.h>
+#include <bootmem.h>
+#include <payload_loader.h>
+
+void mirror_payload(struct payload *payload)
+{
+ char *buffer;
+ size_t size;
+ char *src;
+ uintptr_t alignment_diff;
+ const unsigned long cacheline_size = 64;
+ const uintptr_t intra_cacheline_mask = cacheline_size - 1;
+ const uintptr_t cacheline_mask = ~intra_cacheline_mask;
+
+ src = payload->backing_store.data;
+ size = payload->backing_store.size;
+
+ /*
+ * Adjust size so that the start and end points are aligned to a
+ * cacheline. The SPI hardware controllers on Intel machines should
+ * cache full length cachelines as well as prefetch data. Once the
+ * data is mirrored in memory all accesses should hit the CPU's cache.
+ */
+ alignment_diff = (intra_cacheline_mask & (uintptr_t)src);
+ size += alignment_diff;
+
+ size = ALIGN(size, cacheline_size);
+
+ printk(BIOS_DEBUG, "Payload aligned size: 0x%zx\n", size);
+
+ buffer = bootmem_allocate_buffer(size);
+
+ if (buffer == NULL) {
+ printk(BIOS_DEBUG, "No buffer for mirroring payload.\n");
+ return;
+ }
+
+ src = (void *)(cacheline_mask & (uintptr_t)src);
+
+ /*
+ * Note that if mempcy is not using 32-bit moves the performance will
+ * degrade because the SPI hardware prefetchers look for
+ * cacheline-aligned 32-bit accesses to kick in.
+ */
+ memcpy(buffer, src, size);
+
+ /* Update the payload's backing store. */
+ payload->backing_store.data = &buffer[alignment_diff];
+}