Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2797
-gerrit
commit ed57d885dfc5660fdf79c780b3d390c5ed9f2e75
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Tue Feb 12 00:40:30 2013 -0600
ramstage: Add cbmem_get_table_location()
When CONFIG_EARLY_CBMEM_INIT is selected romstage is supposed to have
initialized cbmem. Therefore provide a weak function for the chipset
to implement named cbmem_get_table_location(). When
CONFIG_EARLY_CBMEM_INIT is selected cbmem_get_table_location() will be
called to get the cbmem location and size. After that cbmem_initialize()
is called.
Change-Id: Idc45a95f9d4b1d83eb3c6d4977f7a8c80c1ffe76
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/include/cbmem.h | 5 +++++
src/lib/hardwaremain.c | 10 ++++++++++
2 files changed, 15 insertions(+)
diff --git a/src/include/cbmem.h b/src/include/cbmem.h
index d0f0c9a..1212cb2 100644
--- a/src/include/cbmem.h
+++ b/src/include/cbmem.h
@@ -67,6 +67,11 @@
#ifndef __ASSEMBLER__
#ifndef __PRE_RAM__
extern uint64_t high_tables_base, high_tables_size;
+#if CONFIG_EARLY_CBMEM_INIT
+/* Return 0 on success, < 0 on error. */
+int __attribute__((weak)) cbmem_get_table_location(uint64_t *tables_base,
+ uint64_t *tables_size);
+#endif
#endif
int cbmem_initialize(void);
diff --git a/src/lib/hardwaremain.c b/src/lib/hardwaremain.c
index d2213d2..b29cc93 100644
--- a/src/lib/hardwaremain.c
+++ b/src/lib/hardwaremain.c
@@ -85,6 +85,16 @@ void hardwaremain(int boot_complete)
/* FIXME: Is there a better way to handle this? */
init_timer();
+ /* CONFIG_EARLY_CBMEM_INIT indicates that romstage initialized
+ * the cbmem area. Therefore the table location can be initialized
+ * early in ramstage if cbmem_get_table_location() is implemented.
+ */
+#if CONFIG_EARLY_CBMEM_INIT
+ if (cbmem_get_table_location != NULL &&
+ !cbmem_get_table_location(&high_tables_base, &high_tables_size))
+ cbmem_initialize();
+#endif
+
timestamp_stash(TS_DEVICE_ENUMERATE);
/* Initialize chips early, they might disable unused devices. */
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2796
-gerrit
commit f8935ebff61c943162eb296514dd150a6a0e6884
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Mon Feb 11 21:15:12 2013 -0600
romstage_handoff: add s3_resume field
Provide a field in the romstage_handoff structure to indicate if the
current boot is an ACPI S3 wake boot. There are currently quite a few
non-standardized ways of passing this knowledge to ramstage from
romstage. Many utilize stashing magic numbers in device-specific
registers. The addition of this field adds a more formalized method
passing along this information. However, it still requires the romstage
chipset code to initialize this field. In short, this change does not
make this a hard requirement for ramstage.
Change-Id: Ia819c0ceed89ed427ef576a036fa870eb7cf57bc
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/include/romstage_handoff.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/include/romstage_handoff.h b/src/include/romstage_handoff.h
index 13dc979..c20b261 100644
--- a/src/include/romstage_handoff.h
+++ b/src/include/romstage_handoff.h
@@ -31,6 +31,8 @@ struct romstage_handoff {
/* This indicates to the ramstage to reserve a chunk of memory. */
uint32_t reserve_base;
uint32_t reserve_size;
+ /* Inidicate if the current boot is an S3 resume. */
+ uint32_t s3_resume;
};
#if defined(__PRE_RAM__)
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2795
-gerrit
commit b6ce7ac7af4a4058b36d5afdacf88f4552257aab
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Mon Feb 11 21:07:18 2013 -0600
romstage_handoff: provide common logic for setup
The romstage_handoff structure can be utilized from different components
of the romstage -- some in the chipset code, some in coreboot's core
libarary. To ensure that all users handle initialization of a newly
added romstage_handoff structure properly, provide a common function to
handle structure initialization.
Change-Id: I3998c6bb228255f4fd93d27812cf749560b06e61
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/include/romstage_handoff.h | 27 +++++++++++++++++++++++++++
src/lib/cbfs.c | 2 +-
2 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/src/include/romstage_handoff.h b/src/include/romstage_handoff.h
index 0cadfb5..13dc979 100644
--- a/src/include/romstage_handoff.h
+++ b/src/include/romstage_handoff.h
@@ -20,6 +20,8 @@
#define ROMSTAGE_HANDOFF_H
#include <stdint.h>
+#include <string.h>
+#include <cbmem.h>
/* It is the chipset's responsbility for maintaining the integrity of this
* structure in CBMEM. For instance, if chipset code adds this structure
@@ -31,5 +33,30 @@ struct romstage_handoff {
uint32_t reserve_size;
};
+#if defined(__PRE_RAM__)
+/* The romstage_handoff_find_or_add() function provides the necessary logic
+ * for initializng the romstage_handoff structure in cbmem. Different components
+ * of the romstage may be responsible for setting up different fields. Therefore
+ * that same logic flow should be used for allocating and initializing the
+ * structure. A newly allocated structure will be memset to 0. */
+static inline struct romstage_handoff *romstage_handoff_find_or_add(void)
+{
+ struct romstage_handoff *handoff;
+
+ /* cbmem_add() first does a find and uses the old location before the
+ * real add. However, it is important to know when the structure is not
+ * found so it can be initialized to 0. */
+ handoff = cbmem_find(CBMEM_ID_ROMSTAGE_INFO);
+
+ if (handoff == NULL) {
+ handoff = cbmem_add(CBMEM_ID_ROMSTAGE_INFO, sizeof(*handoff));
+ if (handoff != NULL)
+ memset(handoff, 0, sizeof(*handoff));
+ }
+
+ return handoff;
+}
+#endif
+
#endif /* ROMSTAGE_HANDOFF_H */
diff --git a/src/lib/cbfs.c b/src/lib/cbfs.c
index 7d81038..c8bfb0c 100644
--- a/src/lib/cbfs.c
+++ b/src/lib/cbfs.c
@@ -161,7 +161,7 @@ void * cbfs_load_stage(struct cbfs_media *media, const char *name)
if (rmodule_load_no_clear_bss(ramstage_loc, &ramstage))
return (void *) -1;
- handoff = cbmem_add(CBMEM_ID_ROMSTAGE_INFO, sizeof(*handoff));
+ handoff = romstage_handoff_find_or_add();
if (handoff) {
handoff->reserve_base = (uint32_t)ramstage_base;
handoff->reserve_size = (uint32_t)cbmem_base -
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2794
-gerrit
commit 27036af262fcd967479baa8cdb1223cf17cbd36f
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Feb 8 22:31:33 2013 -0600
x86: protect against abi assumptions from compiler
Some of the functions called from assembly assume the standard
x86 32-bit ABI of passing all arguments on the stack. However,
that calling ABI can be changed by compiler flags. In order to
protect against the current implicit calling convention annotate
the functions called from assembly with the cdecl function
attribute. That tells the compiler to use the stack based parameter
calling convention.
Change-Id: I83625e1f92c6821a664b191b6ce1250977cf037a
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/cpu/intel/haswell/mp_init.c | 3 ++-
src/cpu/intel/haswell/smmrelocate.c | 6 +++---
src/include/cpu/x86/smm.h | 2 +-
3 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/src/cpu/intel/haswell/mp_init.c b/src/cpu/intel/haswell/mp_init.c
index 47683fb..7f15c39 100644
--- a/src/cpu/intel/haswell/mp_init.c
+++ b/src/cpu/intel/haswell/mp_init.c
@@ -149,7 +149,8 @@ static void cleanup_rom_caching(void)
/* By the time APs call ap_init() caching has been setup, and microcode has
* been loaded. */
-static void ap_init(unsigned int cpu, void *microcode_ptr)
+static void __attribute__((cdecl))
+ap_init(unsigned int cpu, void *microcode_ptr)
{
struct cpu_info *info;
diff --git a/src/cpu/intel/haswell/smmrelocate.c b/src/cpu/intel/haswell/smmrelocate.c
index 4312d79..2bf304e 100644
--- a/src/cpu/intel/haswell/smmrelocate.c
+++ b/src/cpu/intel/haswell/smmrelocate.c
@@ -85,8 +85,8 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
/* The relocation work is actually performed in SMM context, but the code
* resides in the ramstage module. This occurs by trampolining from the default
* SMRAM entry point to here. */
-static void cpu_smm_do_relocation(void *arg, int cpu,
- const struct smm_runtime *runtime)
+static void __attribute__((cdecl))
+cpu_smm_do_relocation(void *arg, int cpu, const struct smm_runtime *runtime)
{
em64t101_smm_state_save_area_t *save_state;
msr_t mtrr_cap;
@@ -209,7 +209,7 @@ static int install_relocation_handler(int num_cpus,
.num_concurrent_stacks = num_cpus,
.per_cpu_save_state_size = save_state_size,
.num_concurrent_save_states = 1,
- .handler = &cpu_smm_do_relocation,
+ .handler = (smm_handler_t)&cpu_smm_do_relocation,
.handler_arg = (void *)relo_params,
};
diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h
index b6a6c4e..62494a9 100644
--- a/src/include/cpu/x86/smm.h
+++ b/src/include/cpu/x86/smm.h
@@ -429,7 +429,7 @@ struct smm_runtime {
u8 apic_id_to_cpu[CONFIG_MAX_CPUS];
} __attribute__ ((packed));
-typedef void (*smm_handler_t)(void *arg, int cpu,
+typedef void __attribute__((cdecl)) (*smm_handler_t)(void *arg, int cpu,
const struct smm_runtime *runtime);
#ifdef __SMM__
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2793
-gerrit
commit 1e0a9e17baf016ce1ef99d8460ebdf928c22db82
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Feb 8 17:38:35 2013 -0600
haswell: support for CONFIG_RELOCATABLE_RAMSTAGE
Now that CONFIG_RELOCTABLE_RAMSTAGE is available support it on
Haswell-based systems. This patch is comprised of the following changes:
1. Ensure that memory is not preserved when a relocatable ramstage is
enabled. There is no need.
2. Pick the proper stack to use after cache-as-ram is torn down. When
the ramstage is relocatable, finding a stack to use before vectoring
into ramstage is impossible since the ramstage is a black box with an
unknown layout.
Change-Id: I2a07a497f52375569bae9c994432a8e7e7a40224
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/cpu/intel/haswell/romstage.c | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/src/cpu/intel/haswell/romstage.c b/src/cpu/intel/haswell/romstage.c
index d62377e..c4a4e8a 100644
--- a/src/cpu/intel/haswell/romstage.c
+++ b/src/cpu/intel/haswell/romstage.c
@@ -60,6 +60,19 @@ static inline u32 *stack_push(u32 *stack, u32 value)
return stack;
}
+static unsigned long choose_top_of_stack(void)
+{
+ unsigned long stack_top;
+#if CONFIG_RELOCATABLE_RAMSTAGE
+ stack_top = (unsigned long)cbmem_add(CBMEM_ID_RESUME_SCRATCH,
+ CONFIG_HIGH_SCRATCH_MEMORY_SIZE);
+ stack_top += CONFIG_HIGH_SCRATCH_MEMORY_SIZE;
+#else
+ stack_top = ROMSTAGE_STACK;
+#endif
+ return stack_top;
+}
+
/* setup_romstage_stack_after_car() determines the stack to use after
* cache-as-ram is torn down as well as the MTRR settings to use. */
static void *setup_romstage_stack_after_car(void)
@@ -70,7 +83,7 @@ static void *setup_romstage_stack_after_car(void)
u32 mtrr_mask_upper;
/* Top of stack needs to be aligned to a 4-byte boundary. */
- top_of_stack = ROMSTAGE_STACK & ~3;
+ top_of_stack = choose_top_of_stack() & ~3;
slot = (void *)top_of_stack;
num_mtrrs = 0;
@@ -245,11 +258,13 @@ void romstage_common(const struct romstage_params *params)
*(u32 *)CBMEM_RESUME_BACKUP = 0;
if ((boot_mode == 2) && cbmem_was_initted) {
+ #if !CONFIG_RELOCATABLE_RAMSTAGE
void *resume_backup_memory = cbmem_find(CBMEM_ID_RESUME);
if (resume_backup_memory) {
*(u32 *)CBMEM_BOOT_MODE = boot_mode;
*(u32 *)CBMEM_RESUME_BACKUP = (u32)resume_backup_memory;
}
+ #endif
/* Magic for S3 resume */
pci_write_config32(PCI_DEV(0, 0x00, 0), SKPAD, 0xcafed00d);
} else if (boot_mode == 2) {
@@ -277,6 +292,8 @@ void romstage_common(const struct romstage_params *params)
static inline void prepare_for_resume(void)
{
+/* Only need to save memory when ramstage isn't relocatable. */
+#if !CONFIG_RELOCATABLE_RAMSTAGE
#if CONFIG_HAVE_ACPI_RESUME
/* Back up the OS-controlled memory where ramstage will be loaded. */
if (*(u32 *)CBMEM_BOOT_MODE == 2) {
@@ -285,6 +302,7 @@ static inline void prepare_for_resume(void)
memcpy(dest, src, HIGH_MEMORY_SAVE);
}
#endif
+#endif
}
void romstage_after_car(void)
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2792
-gerrit
commit c95991ad763ea16884eac6cd4e2819ef5f82e91b
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Feb 8 17:28:04 2013 -0600
coreboot: introduce CONFIG_RELOCATABLE_RAMSTAGE
This patch adds an option to build the ramstage as a reloctable binary.
It uses the rmodule library for the relocation. The main changes
consist of the following:
1. The ramstage is loaded just under the cmbem space.
2. Payloads cannot be loaded over where ramstage is loaded. If a payload
is attempted to load where the relocatable ramstage resides the load
is aborted.
3. The memory occupied by the ramstage is reserved from the OS's usage
using the romstage_handoff structure stored in cbmem. This region is
communicated to ramstage by an CBMEM_ID_ROMSTAGE_INFO entry in cbmem.
4. There is no need to reserve cbmem space for the OS controlled memory for
the resume path because the ramsage region has been reserved in #3.
5. Since no memory needs to be preserved in the wake path, the loading
and begin of execution of a elf payload is straight forward.
Change-Id: Ia66cf1be65c29fa25ca7bd9ea6c8f11d7eee05f5
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/Kconfig | 18 +++++++++
src/arch/x86/Makefile.inc | 8 ++++
src/arch/x86/boot/Makefile.inc | 1 +
src/arch/x86/boot/acpi.c | 4 ++
src/arch/x86/boot/boot.c | 29 ++++++++++++++
src/arch/x86/boot/ramstage_module_header.c | 24 ++++++++++++
src/arch/x86/boot/tables.c | 3 ++
src/include/cbmem.h | 5 +++
src/lib/Makefile.inc | 1 +
src/lib/cbfs.c | 61 ++++++++++++++++++++++++++++++
src/lib/selfboot.c | 16 ++++++++
11 files changed, 170 insertions(+)
diff --git a/src/Kconfig b/src/Kconfig
index f6a83e8..7a8985c 100644
--- a/src/Kconfig
+++ b/src/Kconfig
@@ -307,7 +307,14 @@ config HAVE_INIT_TIMER
config HIGH_SCRATCH_MEMORY_SIZE
hex
+ default 0x5000 if RELOCATABLE_RAMSTAGE
default 0x0
+ help
+ The amount of extra memory to reserve from the OS. If
+ RELOCATABLE_RAMSTAGE is enabled a size of 20KiB is reserved. This is
+ for the use of a stack in romstage after memory has been initialized.
+ The stack size required in romstage can be large when needing to
+ decompress the ramstage.
config USE_OPTION_TABLE
bool
@@ -374,6 +381,17 @@ config RELOCATABLE_MODULES
building relocatable modules in the ram stage. Those modules can be
loaded anywhere and all the relocations are handled automatically.
+config RELOCATABLE_RAMSTAGE
+ depends on RELOCATABLE_MODULES
+ bool "Build the ramstage to be relocatable in 32-bit address space."
+ default n
+ help
+ The reloctable ramstage support allows for the ramstage to be built
+ as a relocatable module. The stage loader can identify a place
+ out of the OS way so that copying memory is unnecessary during an S3
+ wake. When selecting this option the romstage is responsible for
+ determing a stack location to use for loading the ramstage.
+
config HAVE_ACPI_TABLES
bool
help
diff --git a/src/arch/x86/Makefile.inc b/src/arch/x86/Makefile.inc
index cc7bfc2..ee956a4 100644
--- a/src/arch/x86/Makefile.inc
+++ b/src/arch/x86/Makefile.inc
@@ -157,6 +157,12 @@ $(objcbfs)/%.elf: $(objcbfs)/%.debug
################################################################################
# Build the coreboot_ram (stage 2)
+ifeq ($(CONFIG_RELOCATABLE_RAMSTAGE),y)
+
+$(eval $(call rmodule_link,$(objcbfs)/coreboot_ram.debug, $(obj)/arch/x86/boot/ramstage_module_header.ramstage.o $(objgenerated)/coreboot_ram.o, $(CONFIG_HEAP_SIZE)))
+
+else
+
$(objcbfs)/coreboot_ram.debug: $(objgenerated)/coreboot_ram.o $(src)/arch/x86/coreboot_ram.ld
@printf " CC $(subst $(obj)/,,$(@))\n"
ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y)
@@ -165,6 +171,8 @@ else
$(CC) -nostdlib -nostartfiles -static -o $@ -L$(obj) -T $(src)/arch/x86/coreboot_ram.ld $<
endif
+endif
+
$(objgenerated)/coreboot_ram.o: $$(ramstage-objs) $(LIBGCC_FILE_NAME)
@printf " CC $(subst $(obj)/,,$(@))\n"
ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y)
diff --git a/src/arch/x86/boot/Makefile.inc b/src/arch/x86/boot/Makefile.inc
index 3f11c01..9c18043 100644
--- a/src/arch/x86/boot/Makefile.inc
+++ b/src/arch/x86/boot/Makefile.inc
@@ -9,6 +9,7 @@ ramstage-$(CONFIG_GENERATE_ACPI_TABLES) += acpi.c
ramstage-$(CONFIG_GENERATE_SMBIOS_TABLES) += smbios.c
ramstage-$(CONFIG_GENERATE_ACPI_TABLES) += acpigen.c
ramstage-$(CONFIG_HAVE_ACPI_RESUME) += wakeup.S
+ramstage-$(CONFIG_RELOCATABLE_RAMSTAGE) += ramstage_module_header.c
$(obj)/arch/x86/boot/coreboot_table.ramstage.o : $(OPTION_TABLE_H)
$(obj)/arch/x86/boot/smbios.ramstage.o: $(obj)/build.h
diff --git a/src/arch/x86/boot/acpi.c b/src/arch/x86/boot/acpi.c
index 327d175..b04cbe5 100644
--- a/src/arch/x86/boot/acpi.c
+++ b/src/arch/x86/boot/acpi.c
@@ -759,6 +759,9 @@ extern unsigned int __wakeup_size;
void acpi_jump_to_wakeup(void *vector)
{
+#if CONFIG_RELOCATABLE_RAMSTAGE
+ u32 acpi_backup_memory = 0;
+#else
u32 acpi_backup_memory = (u32)cbmem_find(CBMEM_ID_RESUME);
if (!acpi_backup_memory) {
@@ -766,6 +769,7 @@ void acpi_jump_to_wakeup(void *vector)
"No S3 resume.\n");
return;
}
+#endif
#if CONFIG_SMP
// FIXME: This should go into the ACPI backup memory, too. No pork saussages.
diff --git a/src/arch/x86/boot/boot.c b/src/arch/x86/boot/boot.c
index d9cb02e..4892c5e 100644
--- a/src/arch/x86/boot/boot.c
+++ b/src/arch/x86/boot/boot.c
@@ -68,6 +68,34 @@ int elf_check_arch(Elf_ehdr *ehdr)
}
+#if CONFIG_RELOCATABLE_RAMSTAGE
+/* When the ramstage is relocatable the elf loading ensures an elf image cannot
+ * be loaded over the ramstage code. */
+void jmp_to_elf_entry(void *entry, unsigned long unused1, unsigned long unused2)
+{
+ elf_boot_notes.hdr.b_checksum =
+ compute_ip_checksum(&elf_boot_notes, sizeof(elf_boot_notes));
+
+ /* Jump to kernel */
+ __asm__ __volatile__(
+ " cld \n\t"
+ /* Now jump to the loaded image */
+ " call *%0\n\t"
+
+ /* The loaded image returned? */
+ " cli \n\t"
+ " cld \n\t"
+
+ ::
+ "r" (entry),
+#if CONFIG_MULTIBOOT
+ "b"(mbi), "a" (MB_MAGIC2)
+#else
+ "b"(&elf_boot_notes), "a" (0x0E1FB007)
+#endif
+ );
+}
+#else
void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size)
{
extern unsigned char _ram_seg, _eram_seg;
@@ -182,5 +210,6 @@ void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size)
#endif
);
}
+#endif /* CONFIG_RELOCATABLE_RAMSTAGE */
diff --git a/src/arch/x86/boot/ramstage_module_header.c b/src/arch/x86/boot/ramstage_module_header.c
new file mode 100644
index 0000000..b958c16
--- /dev/null
+++ b/src/arch/x86/boot/ramstage_module_header.c
@@ -0,0 +1,24 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <rmodule.h>
+
+extern char _start[];
+
+DEFINE_RMODULE_HEADER(ramstage_module, _start, RMODULE_TYPE_STAGE);
diff --git a/src/arch/x86/boot/tables.c b/src/arch/x86/boot/tables.c
index 7f28861..294a10c 100644
--- a/src/arch/x86/boot/tables.c
+++ b/src/arch/x86/boot/tables.c
@@ -232,11 +232,14 @@ struct lb_memory *write_tables(void)
post_code(0x9e);
#if CONFIG_HAVE_ACPI_RESUME
+/* Only add CBMEM_ID_RESUME when the ramstage isn't relocatable. */
+#if !CONFIG_RELOCATABLE_RAMSTAGE
/* Let's prepare the ACPI S3 Resume area now already, so we can rely on
* it begin there during reboot time. We don't need the pointer, nor
* the result right now. If it fails, ACPI resume will be disabled.
*/
cbmem_add(CBMEM_ID_RESUME, HIGH_MEMORY_SAVE);
+#endif
#if CONFIG_NORTHBRIDGE_AMD_AGESA_FAMILY14 || CONFIG_NORTHBRIDGE_AMD_AGESA_FAMILY15_TN
cbmem_add(CBMEM_ID_RESUME_SCRATCH, CONFIG_HIGH_SCRATCH_MEMORY_SIZE);
#endif
diff --git a/src/include/cbmem.h b/src/include/cbmem.h
index 0aa9b30..d0f0c9a 100644
--- a/src/include/cbmem.h
+++ b/src/include/cbmem.h
@@ -28,7 +28,12 @@
#endif
#if CONFIG_HAVE_ACPI_RESUME
+#if CONFIG_RELOCATABLE_RAMSTAGE
+#define HIGH_MEMORY_SAVE 0
+#else
#define HIGH_MEMORY_SAVE (CONFIG_RAMTOP - CONFIG_RAMBASE)
+#endif
+
#define HIGH_MEMORY_SIZE (HIGH_MEMORY_SAVE + CONFIG_HIGH_SCRATCH_MEMORY_SIZE + HIGH_MEMORY_DEF_SIZE)
/* Delegation of resume backup memory so we don't have to
diff --git a/src/lib/Makefile.inc b/src/lib/Makefile.inc
index 160fce6..fe04e43 100644
--- a/src/lib/Makefile.inc
+++ b/src/lib/Makefile.inc
@@ -109,6 +109,7 @@ $(obj)/lib/uart8250.smm.o : $(OPTION_TABLE_H)
ifeq ($(CONFIG_RELOCATABLE_MODULES),y)
ramstage-y += rmodule.c
+romstage-$(CONFIG_RELOCATABLE_RAMSTAGE) += rmodule.c
RMODULE_LDSCRIPT := $(src)/lib/rmodule.ld
RMODULE_LDFLAGS := -nostartfiles -shared -z defs -nostdlib -Bsymbolic -T $(RMODULE_LDSCRIPT)
diff --git a/src/lib/cbfs.c b/src/lib/cbfs.c
index abb95ab..7d81038 100644
--- a/src/lib/cbfs.c
+++ b/src/lib/cbfs.c
@@ -36,6 +36,7 @@
#include <cbfs.h>
#include <string.h>
+#include <cbmem.h>
#ifdef LIBPAYLOAD
# include <stdio.h>
@@ -114,6 +115,65 @@ void *cbfs_load_optionrom(struct cbfs_media *media, uint16_t vendor,
return dest;
}
+#if CONFIG_RELOCATABLE_RAMSTAGE && defined(__PRE_RAM__)
+
+#include <rmodule.h>
+#include <romstage_handoff.h>
+/* When CONFIG_RELOCATABLE_RAMSTAGE is enabled and this file is being compiled
+ * for the romstage the rmodule loader is used. The ramstage is placed just
+ * below the cbemem location. */
+
+void * cbfs_load_stage(struct cbfs_media *media, const char *name)
+{
+ struct cbfs_stage *stage;
+ struct rmodule ramstage;
+ void *cbmem_base;
+ void *ramstage_base;
+ void *decompression_loc;
+ void *ramstage_loc;
+ struct romstage_handoff *handoff;
+
+ stage = (struct cbfs_stage *)
+ cbfs_get_file_content(media, name, CBFS_TYPE_STAGE);
+
+ if (stage == NULL)
+ return (void *) -1;
+
+ cbmem_base = get_cbmem_toc();
+ if (cbmem_base == NULL)
+ return (void *) -1;
+
+ ramstage_base = rmodule_find_region_below(cbmem_base, stage->memlen,
+ &ramstage_loc,
+ &decompression_loc);
+
+ LOG("Decompressing stage %s @ 0x%p (%d bytes)\n",
+ name, decompression_loc, stage->memlen);
+
+ if (cbfs_decompress(stage->compression, &stage[1],
+ decompression_loc, stage->len))
+ return (void *) -1;
+
+ if (rmodule_parse(decompression_loc, &ramstage))
+ return (void *) -1;
+
+ /* The ramstage is responsible for clearing its own bss. */
+ if (rmodule_load_no_clear_bss(ramstage_loc, &ramstage))
+ return (void *) -1;
+
+ handoff = cbmem_add(CBMEM_ID_ROMSTAGE_INFO, sizeof(*handoff));
+ if (handoff) {
+ handoff->reserve_base = (uint32_t)ramstage_base;
+ handoff->reserve_size = (uint32_t)cbmem_base -
+ (uint32_t)ramstage_base;
+ } else
+ LOG("Couldn't allocate romstage handoff.\n");
+
+ return rmodule_entry(&ramstage);
+}
+
+#else
+
void * cbfs_load_stage(struct cbfs_media *media, const char *name)
{
struct cbfs_stage *stage = (struct cbfs_stage *)
@@ -145,6 +205,7 @@ void * cbfs_load_stage(struct cbfs_media *media, const char *name)
return (void *) entry;
}
+#endif /* CONFIG_RELOCATABLE_RAMSTAGE */
int cbfs_execute_stage(struct cbfs_media *media, const char *name)
{
diff --git a/src/lib/selfboot.c b/src/lib/selfboot.c
index f32bb81..f933142 100644
--- a/src/lib/selfboot.c
+++ b/src/lib/selfboot.c
@@ -78,6 +78,16 @@ struct segment {
static unsigned long bounce_size, bounce_buffer;
+#if CONFIG_RELOCATABLE_RAMSTAGE
+static void get_bounce_buffer(struct lb_memory *mem, unsigned long req_size)
+{
+ /* When the ramstage is relocatable there is no need for a bounce
+ * buffer. All payloads should not overlap the ramstage.
+ */
+ bounce_buffer = ~0UL;
+ bounce_size = 0;
+}
+#else
static void get_bounce_buffer(struct lb_memory *mem, unsigned long req_size)
{
unsigned long lb_size;
@@ -114,6 +124,7 @@ static void get_bounce_buffer(struct lb_memory *mem, unsigned long req_size)
bounce_buffer = buffer;
bounce_size = req_size;
}
+#endif /* CONFIG_RELOCATABLE_RAMSTAGE */
static int valid_area(struct lb_memory *mem, unsigned long buffer,
unsigned long start, unsigned long len)
@@ -394,8 +405,13 @@ static int load_self_segments(
for(ptr = head->next; ptr != head; ptr = ptr->next) {
if (!overlaps_coreboot(ptr))
continue;
+#if CONFIG_RELOCATABLE_RAMSTAGE
+ /* payloads are required to not overlap ramstage. */
+ return 0;
+#else
if (ptr->s_dstaddr + ptr->s_memsz > bounce_high)
bounce_high = ptr->s_dstaddr + ptr->s_memsz;
+#endif
}
get_bounce_buffer(mem, bounce_high - lb_start);
if (!bounce_buffer) {
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2791
-gerrit
commit 099bf93a146866edb3953390a90d15b69b345400
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Feb 8 17:15:53 2013 -0600
coreboot: introduce romstage_handoff structure
The romstage_handoff structure is intended to be a way for romstage and
ramstage to communicate with one another instead of using sideband
signals such as stuffing magic values in pci config or memory
scratch space. Initially this structure just contains a single region
that indicates to ramstage that it should reserve a memory region used
by the romstage. Ramstage looks for a romstage_handoff structure in cbmem
with an id of CBMEM_ID_ROMSTAGE_INFO. If found, it will honor reserving
the region defined in the romstage_handoff structure.
Change-Id: I9274ea5124e9bd6584f6977d8280b7e9292251f0
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/arch/x86/boot/coreboot_table.c | 20 ++++++++++++++++++++
src/include/romstage_handoff.h | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 55 insertions(+)
diff --git a/src/arch/x86/boot/coreboot_table.c b/src/arch/x86/boot/coreboot_table.c
index ab0f7ef..463f723 100644
--- a/src/arch/x86/boot/coreboot_table.c
+++ b/src/arch/x86/boot/coreboot_table.c
@@ -31,6 +31,7 @@
#include <stdlib.h>
#include <cbfs.h>
#include <cbmem.h>
+#include <romstage_handoff.h>
#if CONFIG_USE_OPTION_TABLE
#include <option_table.h>
#endif
@@ -591,6 +592,23 @@ static void add_lb_reserved(struct lb_memory *mem)
lb_add_rsvd_range, mem);
}
+static void add_romstage_resources(struct lb_memory *mem)
+{
+ struct romstage_handoff *handoff;
+
+ /* Reserve memory requested to be reserved from romstage. */
+ handoff = cbmem_find(CBMEM_ID_ROMSTAGE_INFO);
+
+ if (handoff == NULL)
+ return;
+
+ if (handoff->reserve_size == 0)
+ return;
+
+ lb_add_memory_range(mem, LB_MEM_RESERVED, handoff->reserve_base,
+ handoff->reserve_size);
+}
+
unsigned long write_coreboot_table(
unsigned long low_table_start, unsigned long low_table_end,
unsigned long rom_table_start, unsigned long rom_table_end)
@@ -658,6 +676,8 @@ unsigned long write_coreboot_table(
/* Add reserved regions */
add_lb_reserved(mem);
+ add_romstage_resources(mem);
+
lb_dump_memory_ranges(mem);
/* Note:
diff --git a/src/include/romstage_handoff.h b/src/include/romstage_handoff.h
new file mode 100644
index 0000000..0cadfb5
--- /dev/null
+++ b/src/include/romstage_handoff.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef ROMSTAGE_HANDOFF_H
+#define ROMSTAGE_HANDOFF_H
+
+#include <stdint.h>
+
+/* It is the chipset's responsbility for maintaining the integrity of this
+ * structure in CBMEM. For instance, if chipset code adds this structure
+ * using the CBMEM_ID_ROMSTAGE_INFO id it needs to ensure it doesn't clobber
+ * fields it doesn't own. */
+struct romstage_handoff {
+ /* This indicates to the ramstage to reserve a chunk of memory. */
+ uint32_t reserve_base;
+ uint32_t reserve_size;
+};
+
+#endif /* ROMSTAGE_HANDOFF_H */
+