[coreboot-gerrit] Patch set updated for coreboot: 27d953e x86: always mirror payload to ram before loading

Aaron Durbin (adurbin@google.com) gerrit at coreboot.org
Wed Feb 26 23:57:30 CET 2014


Aaron Durbin (adurbin at google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/5305

-gerrit

commit 27d953eafcd9c324346915d48544e83f21f452f4
Author: Aaron Durbin <adurbin at chromium.org>
Date:   Tue Feb 25 20:36:56 2014 -0600

    x86: always mirror payload to ram before loading
    
    Boot speeds can be sped up by mirroring the payload into
    main memory before doing the actual loading. Systems that
    would benefit from this are typically Intel ones whose SPI
    are memory mapped. Without the SPI being cached all accesses
    to the payload in SPI while being loaded result in uncacheable
    accesses. Instead take advantage of the on-board SPI controller
    which has an internal cache and prefetcher by copying 64-byte
    cachelines using 32-bit word copies.
    
    Change-Id: I4aac856b1b5130fa2d68a6c45a96cfeead472a52
    Signed-off-by: Aaron Durbin <adurbin at chromium.org>
---
 src/cpu/x86/Makefile.inc               |  1 +
 src/cpu/x86/mirror_payload.c           | 71 ++++++++++++++++++++++++++++++++++
 src/include/payload_loader.h           |  3 ++
 src/lib/loaders/load_and_run_payload.c |  7 ++++
 4 files changed, 82 insertions(+)

diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc
index d5bc2fd..c26edf6 100644
--- a/src/cpu/x86/Makefile.inc
+++ b/src/cpu/x86/Makefile.inc
@@ -3,6 +3,7 @@ romstage-$(CONFIG_HAVE_ACPI_RESUME) += car.c
 
 subdirs-$(CONFIG_PARALLEL_MP) += name
 ramstage-$(CONFIG_PARALLEL_MP) += mp_init.c
+ramstage-y += mirror_payload.c
 
 SIPI_ELF=$(obj)/cpu/x86/sipi_vector.elf
 SIPI_BIN=$(SIPI_ELF:.elf=)
diff --git a/src/cpu/x86/mirror_payload.c b/src/cpu/x86/mirror_payload.c
new file mode 100644
index 0000000..edd2641
--- /dev/null
+++ b/src/cpu/x86/mirror_payload.c
@@ -0,0 +1,71 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <console/console.h>
+#include <bootmem.h>
+#include <payload_loader.h>
+
+void mirror_payload(struct payload *payload)
+{
+	char *buffer;
+	size_t size;
+	char *src;
+	uintptr_t alignment_diff;
+	const unsigned long cacheline_size = 64;
+	const uintptr_t intra_cacheline_mask = cacheline_size - 1;
+	const uintptr_t cacheline_mask = ~intra_cacheline_mask;
+
+	src = payload->backing_store.data;
+	size = payload->backing_store.size;
+
+	/*
+	 * Adjust size so that the start and end points are aligned to a
+	 * cacheline. The SPI hardware controllers on Intel machines should
+	 * cache full length cachelines as well as prefetch data.  Once the
+	 * data is mirrored in memory all accesses should hit the CPU's cache.
+	 */
+	alignment_diff = (intra_cacheline_mask & (uintptr_t)src);
+	size += alignment_diff;
+
+	size = ALIGN(size, cacheline_size);
+
+	printk(BIOS_DEBUG, "Payload aligned size: 0x%zx\n", size);
+
+	buffer = bootmem_allocate_buffer(size);
+
+	if (buffer == NULL) {
+		printk(BIOS_DEBUG, "No buffer for mirroring payload.\n");
+		return;
+	}
+
+	src = (void *)(cacheline_mask & (uintptr_t)src);
+
+	/*
+	 * Note that if mempcy is not using 32-bit moves the performance will
+	 * degrade because the SPI hardware prefetchers look for
+	 * cacheline-aligned 32-bit accesses to kick in.
+	 */
+	memcpy(buffer, src, size);
+
+	/* Update the payload's backing store. */
+	payload->backing_store.data = &buffer[alignment_diff];
+}
diff --git a/src/include/payload_loader.h b/src/include/payload_loader.h
index 7ef5806..7a3f045 100644
--- a/src/include/payload_loader.h
+++ b/src/include/payload_loader.h
@@ -44,6 +44,9 @@ struct payload *payload_load(void);
 /* Run the loaded payload. */
 void payload_run(const struct payload *payload);
 
+/* Mirror the payload to be loaded. */
+void mirror_payload(struct payload *payload);
+
 /* architecture specific function to run payload. */
 void arch_payload_run(const struct payload *payload);
 
diff --git a/src/lib/loaders/load_and_run_payload.c b/src/lib/loaders/load_and_run_payload.c
index 7e1383e..2204090 100644
--- a/src/lib/loaders/load_and_run_payload.c
+++ b/src/lib/loaders/load_and_run_payload.c
@@ -39,6 +39,11 @@ static struct payload global_payload = {
 	.name = CONFIG_CBFS_PREFIX "/payload",
 };
 
+void __attribute__((weak)) mirror_payload(struct payload *payload)
+{
+	return;
+}
+
 struct payload *payload_load(void)
 {
 	int i;
@@ -62,6 +67,8 @@ struct payload *payload_load(void)
 	if (i == ARRAY_SIZE(payload_ops))
 		return NULL;
 
+	mirror_payload(payload);
+
 	entry = selfload(payload);
 
 	if (entry == NULL)



More information about the coreboot-gerrit mailing list