Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2856
-gerrit
commit fa67c1eff783c71023ee1e793938feb01ec7f714
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Mar 1 17:40:49 2013 -0600
haswell: vboot path support in romstage
Take the vboot path in romstage. This will complete the haswell
support for vboot firmware selection.
Built and booted. Noted firmware select worked on an image with
RW firmware support. Also checked that recovery mode worked as
well by choosing the RO path.
Change-Id: Ie2b0a34e6c5c45e6f0d25f77a5fdbaef0324cb09
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/cpu/intel/haswell/romstage.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/src/cpu/intel/haswell/romstage.c b/src/cpu/intel/haswell/romstage.c
index 1d58191..54f4a97 100644
--- a/src/cpu/intel/haswell/romstage.c
+++ b/src/cpu/intel/haswell/romstage.c
@@ -298,13 +298,11 @@ void romstage_common(const struct romstage_params *params)
#endif
}
-static inline void prepare_for_resume(void)
+static inline void prepare_for_resume(struct romstage_handoff *handoff)
{
/* Only need to save memory when ramstage isn't relocatable. */
#if !CONFIG_RELOCATABLE_RAMSTAGE
#if CONFIG_HAVE_ACPI_RESUME
- struct romstage_handoff *handoff = romstage_handoff_find_or_add();
-
/* Back up the OS-controlled memory where ramstage will be loaded. */
if (handoff != NULL && handoff->s3_resume) {
void *src = (void *)CONFIG_RAMBASE;
@@ -318,7 +316,16 @@ static inline void prepare_for_resume(void)
void romstage_after_car(void)
{
- prepare_for_resume();
+ struct romstage_handoff *handoff;
+
+ handoff = romstage_handoff_find_or_add();
+
+ prepare_for_resume(handoff);
+
+#if CONFIG_VBOOT_VERIFY_FIRMWARE
+ vboot_verify_firmware(handoff);
+#endif
+
/* Load the ramstage. */
copy_and_run(0);
}
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2855
-gerrit
commit 75b919cd8ad04920e753910544cdde08fa5c9c86
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Mar 1 17:38:59 2013 -0600
haswell boards: support added chromeos function
The get_write_protect_state() function was added to the
chromeos API that needs to be supported by the boards.
Implement this support.
Built and booted. Noted firmware select worked on an image with
RW firmware support. Also checked that recovery mode worked as
well by choosing the RO path.
Change-Id: Ifd213be25304163fc61d153feac4f5a875a40902
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/mainboard/intel/baskingridge/chromeos.c | 5 +++++
src/mainboard/intel/wtm1/chromeos.c | 6 ++++++
src/mainboard/intel/wtm2/chromeos.c | 6 ++++++
3 files changed, 17 insertions(+)
diff --git a/src/mainboard/intel/baskingridge/chromeos.c b/src/mainboard/intel/baskingridge/chromeos.c
index 6a1bc26..677f177 100644
--- a/src/mainboard/intel/baskingridge/chromeos.c
+++ b/src/mainboard/intel/baskingridge/chromeos.c
@@ -127,3 +127,8 @@ int get_recovery_mode_switch(void)
return (gp_lvl3 >> (69-64)) & 1;
}
+int get_write_protect_state(void)
+{
+ return 0;
+}
+
diff --git a/src/mainboard/intel/wtm1/chromeos.c b/src/mainboard/intel/wtm1/chromeos.c
index 1864754..c2386a8 100644
--- a/src/mainboard/intel/wtm1/chromeos.c
+++ b/src/mainboard/intel/wtm1/chromeos.c
@@ -75,3 +75,9 @@ int get_recovery_mode_switch(void)
{
return 0; // force off
}
+
+int get_write_protect_state(void)
+{
+ return 0;
+}
+
diff --git a/src/mainboard/intel/wtm2/chromeos.c b/src/mainboard/intel/wtm2/chromeos.c
index 1864754..c2386a8 100644
--- a/src/mainboard/intel/wtm2/chromeos.c
+++ b/src/mainboard/intel/wtm2/chromeos.c
@@ -75,3 +75,9 @@ int get_recovery_mode_switch(void)
{
return 0; // force off
}
+
+int get_write_protect_state(void)
+{
+ return 0;
+}
+
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2853
-gerrit
commit 89c477780a65f41b3186487265680863fb21ac47
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Fri Mar 1 17:10:28 2013 -0600
rmodule: add vboot rmodule type
For completeness add a vboot rmodule type since vboot will be
built as an rmodule.
Change-Id: I4b9b1e6f6077f811cafbb81effd4d082c91d4300
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/include/rmodule.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/include/rmodule.h b/src/include/rmodule.h
index f46a59d..62c0bf3 100644
--- a/src/include/rmodule.h
+++ b/src/include/rmodule.h
@@ -29,6 +29,7 @@ enum {
RMODULE_TYPE_SMM,
RMODULE_TYPE_SIPI_VECTOR,
RMODULE_TYPE_STAGE,
+ RMODULE_TYPE_VBOOT,
};
struct rmodule;
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2849
-gerrit
commit 303cd1af18c44132473a5bfd6fcae353338b0016
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Wed Feb 27 22:50:12 2013 -0600
coreboot: dynamic cbmem requirement
Dynamic cbmem is now a requirement for relocatable ramstage.
This patch replaces the reserve_* fields in the romstage_handoff
structure by using the dynamic cbmem library.
The haswell code is not moved over in this commit, but it should be
safe because there is a hard requirement for DYNAMIC_CBMEM when using
a reloctable ramstage.
Change-Id: I59ab4552c3ae8c2c3982df458cd81a4a9b712cc2
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/Kconfig | 9 +--
src/arch/x86/boot/coreboot_table.c | 20 -------
src/include/cbfs.h | 21 ++++---
src/include/cbmem.h | 5 --
src/include/rmodule.h | 16 ++---
src/include/romstage_handoff.h | 3 -
src/lib/cbfs.c | 91 ++++++++++++++++-------------
src/lib/hardwaremain.c | 9 ---
src/lib/rmodule.c | 38 ++++++------
src/northbridge/intel/haswell/northbridge.c | 15 -----
10 files changed, 94 insertions(+), 133 deletions(-)
diff --git a/src/Kconfig b/src/Kconfig
index 0297970..18b5bad 100644
--- a/src/Kconfig
+++ b/src/Kconfig
@@ -315,14 +315,7 @@ config HAVE_INIT_TIMER
config HIGH_SCRATCH_MEMORY_SIZE
hex
- default 0x5000 if RELOCATABLE_RAMSTAGE
default 0x0
- help
- The amount of extra memory to reserve from the OS. If
- RELOCATABLE_RAMSTAGE is enabled a size of 20KiB is reserved. This is
- for the use of a stack in romstage after memory has been initialized.
- The stack size required in romstage can be large when needing to
- decompress the ramstage.
config USE_OPTION_TABLE
bool
@@ -390,7 +383,7 @@ config RELOCATABLE_MODULES
loaded anywhere and all the relocations are handled automatically.
config RELOCATABLE_RAMSTAGE
- depends on RELOCATABLE_MODULES
+ depends on (RELOCATABLE_MODULES && DYNAMIC_CBMEM)
bool "Build the ramstage to be relocatable in 32-bit address space."
default n
help
diff --git a/src/arch/x86/boot/coreboot_table.c b/src/arch/x86/boot/coreboot_table.c
index 06d3888..3b22569 100644
--- a/src/arch/x86/boot/coreboot_table.c
+++ b/src/arch/x86/boot/coreboot_table.c
@@ -31,7 +31,6 @@
#include <stdlib.h>
#include <cbfs.h>
#include <cbmem.h>
-#include <romstage_handoff.h>
#if CONFIG_USE_OPTION_TABLE
#include <option_table.h>
#endif
@@ -596,23 +595,6 @@ static void add_lb_reserved(struct lb_memory *mem)
lb_add_rsvd_range, mem);
}
-static void add_romstage_resources(struct lb_memory *mem)
-{
- struct romstage_handoff *handoff;
-
- /* Reserve memory requested to be reserved from romstage. */
- handoff = cbmem_find(CBMEM_ID_ROMSTAGE_INFO);
-
- if (handoff == NULL)
- return;
-
- if (handoff->reserve_size == 0)
- return;
-
- lb_add_memory_range(mem, LB_MEM_RESERVED, handoff->reserve_base,
- handoff->reserve_size);
-}
-
unsigned long write_coreboot_table(
unsigned long low_table_start, unsigned long low_table_end,
unsigned long rom_table_start, unsigned long rom_table_end)
@@ -686,8 +668,6 @@ unsigned long write_coreboot_table(
/* Add reserved regions */
add_lb_reserved(mem);
- add_romstage_resources(mem);
-
lb_dump_memory_ranges(mem);
/* Note:
diff --git a/src/include/cbfs.h b/src/include/cbfs.h
index 811df88..ac249aa 100644
--- a/src/include/cbfs.h
+++ b/src/include/cbfs.h
@@ -87,21 +87,24 @@ int init_default_cbfs_media(struct cbfs_media *media);
/* The cache_loaded_ramstage() and load_cached_ramstage() functions are defined
* to be weak so that board and chipset code may override them. Their job is to
* cache and load the ramstage for quick S3 resume. By default a copy of the
- * relocated ramstage is saved just below the running ramstage region. These
+ * relocated ramstage is saved using the cbmem infrastructure. These
* functions are only valid during romstage. */
struct romstage_handoff;
+struct cbmem_entry;
-/* The implementer of cache_loaded_ramstage() needs to ensure that the
- * reserve_* fields in in romstage_handoff reflect the memory footprint of the
- * ramstage (including cached region). Note that the handoff variable can be
- * NULL. */
+/* The implementer of cache_loaded_ramstage() may use the romstage_handoff
+ * structure to store information, but note that the handoff variable can be
+ * NULL. The ramstage cbmem_entry represents the region occupied by the loaded
+ * ramstage. */
void __attribute__((weak))
-cache_loaded_ramstage(struct romstage_handoff *handoff, void *ramstage_base,
- uint32_t ramstage_size, void *entry_point);
-/* Return NULL on error or entry point on success. */
+cache_loaded_ramstage(struct romstage_handoff *handoff,
+ const struct cbmem_entry *ramstage, void *entry_point);
+/* Return NULL on error or entry point on success. The ramstage cbmem_entry is
+ * the region where to load the cached contents to. */
void * __attribute__((weak))
-load_cached_ramstage(struct romstage_handoff *handoff);
+load_cached_ramstage(struct romstage_handoff *handoff,
+ const struct cbmem_entry *ramstage);
#endif /* CONFIG_RELOCATABLE_RAMSTAGE */
#endif
diff --git a/src/include/cbmem.h b/src/include/cbmem.h
index 41f5971..b3d9f86 100644
--- a/src/include/cbmem.h
+++ b/src/include/cbmem.h
@@ -131,11 +131,6 @@ void cbmem_add_lb_mem(struct lb_memory *mem);
#ifndef __PRE_RAM__
extern uint64_t high_tables_base, high_tables_size;
-#if CONFIG_EARLY_CBMEM_INIT
-/* Return 0 on success, < 0 on error. */
-int __attribute__((weak)) cbmem_get_table_location(uint64_t *tables_base,
- uint64_t *tables_size);
-#endif
void set_cbmem_toc(struct cbmem_entry *);
#endif
diff --git a/src/include/rmodule.h b/src/include/rmodule.h
index e8e7636..f46a59d 100644
--- a/src/include/rmodule.h
+++ b/src/include/rmodule.h
@@ -42,13 +42,15 @@ int rmodule_memory_size(const struct rmodule *m);
int rmodule_load(void *loc, struct rmodule *m);
int rmodule_load_no_clear_bss(void *base, struct rmodule *m);
int rmodule_load_alignment(const struct rmodule *m);
-/* Returns the an aligned pointer that reflects a region used below addr
- * based on the rmodule_size. i.e. the returned pointer up to addr is memory
- * that may be utilized by the rmodule. program_start and rmodule_start
- * are pointers updated to reflect where the rmodule program starts and where
- * the rmodule (including header) should be placed respectively. */
-void *rmodule_find_region_below(void *addr, size_t rmodule_size,
- void **program_start, void **rmodule_start);
+/* rmodule_calc_region() calculates the region size, offset to place an
+ * rmodule in memory, and load address offset based off of a region allocator
+ * with an alignment of region_alignment. This function helps place an rmodule
+ * in the same location in ram it will run from. The offset to place the
+ * rmodule into the region allocated of size region_size is returned. The
+ * load_offset is the address to load and relocate the rmodule.
+ * region_alignment must be a power of 2. */
+int rmodule_calc_region(unsigned int region_alignment, size_t rmodule_size,
+ size_t *region_size, int *load_offset);
#define FIELD_ENTRY(x_) ((u32)&x_)
#define RMODULE_HEADER(entry_, type_) \
diff --git a/src/include/romstage_handoff.h b/src/include/romstage_handoff.h
index 4150e8e..3152fb2 100644
--- a/src/include/romstage_handoff.h
+++ b/src/include/romstage_handoff.h
@@ -28,9 +28,6 @@
* using the CBMEM_ID_ROMSTAGE_INFO id it needs to ensure it doesn't clobber
* fields it doesn't own. */
struct romstage_handoff {
- /* This indicates to the ramstage to reserve a chunk of memory. */
- uint32_t reserve_base;
- uint32_t reserve_size;
/* Inidicate if the current boot is an S3 resume. If
* CONFIG_RELOCTABLE_RAMSTAGE is enabled the chipset code is
* responsible for initializing this variable. Otherwise, ramstage
diff --git a/src/lib/cbfs.c b/src/lib/cbfs.c
index 8bcb000..48ee86a 100644
--- a/src/lib/cbfs.c
+++ b/src/lib/cbfs.c
@@ -120,41 +120,48 @@ void *cbfs_load_optionrom(struct cbfs_media *media, uint16_t vendor,
#include <rmodule.h>
#include <romstage_handoff.h>
/* When CONFIG_RELOCATABLE_RAMSTAGE is enabled and this file is being compiled
- * for the romstage, the rmodule loader is used. The ramstage is placed just
- * below the cbmem location. */
-
+ * for the romstage, the rmodule loader is used. */
void __attribute__((weak))
-cache_loaded_ramstage(struct romstage_handoff *handoff, void *ramstage_base,
- uint32_t ramstage_size, void *entry_point)
+cache_loaded_ramstage(struct romstage_handoff *handoff,
+ const struct cbmem_entry *ramstage, void *entry_point)
{
+ uint32_t ramstage_size;
+ const struct cbmem_entry *entry;
+
if (handoff == NULL)
return;
- /* Cache the loaded ramstage just below the to-be-run ramstage. Then
- * save the base, size, and entry point in the handoff area. */
- handoff->reserve_base = (uint32_t)ramstage_base - ramstage_size;
- handoff->reserve_size = ramstage_size;
- handoff->ramstage_entry_point = (uint32_t)entry_point;
+ ramstage_size = cbmem_entry_size(ramstage);
+ /* cbmem_entry_add() does a find() before add(). */
+ entry = cbmem_entry_add(CBMEM_ID_RAMSTAGE_CACHE, ramstage_size);
- memcpy((void *)handoff->reserve_base, ramstage_base, ramstage_size);
+ if (entry == NULL)
+ return;
+
+ /* Keep track of the entry point in the handoff structure. */
+ handoff->ramstage_entry_point = (uint32_t)entry_point;
- /* Update the reserve region by 2x in order to store the cached copy. */
- handoff->reserve_size += handoff->reserve_size;
+ memcpy(cbmem_entry_start(entry), cbmem_entry_start(ramstage),
+ ramstage_size);
}
void * __attribute__((weak))
-load_cached_ramstage(struct romstage_handoff *handoff)
+load_cached_ramstage(struct romstage_handoff *handoff,
+ const struct cbmem_entry *ramstage)
{
- uint32_t ramstage_size;
+ const struct cbmem_entry *entry_cache;
if (handoff == NULL)
return NULL;
- /* Load the cached ramstage copy into the to-be-run region. It is just
- * above the cached copy. */
- ramstage_size = handoff->reserve_size / 2;
- memcpy((void *)(handoff->reserve_base + ramstage_size),
- (void *)handoff->reserve_base, ramstage_size);
+ entry_cache = cbmem_entry_find(CBMEM_ID_RAMSTAGE_CACHE);
+
+ if (entry_cache == NULL)
+ return NULL;
+
+ /* Load the cached ramstage copy into the to-be-run region. */
+ memcpy(cbmem_entry_start(ramstage), cbmem_entry_start(entry_cache),
+ cbmem_entry_size(ramstage));
return (void *)handoff->ramstage_entry_point;
}
@@ -164,12 +171,12 @@ static void *load_stage_from_cbfs(struct cbfs_media *media, const char *name,
{
struct cbfs_stage *stage;
struct rmodule ramstage;
- char *cbmem_base;
- char *ramstage_base;
- void *decompression_loc;
- void *ramstage_loc;
void *entry_point;
- uint32_t ramstage_size;
+ size_t region_size;
+ char *ramstage_region;
+ int rmodule_offset;
+ int load_offset;
+ const struct cbmem_entry *ramstage_entry;
stage = (struct cbfs_stage *)
cbfs_get_file_content(media, name, CBFS_TYPE_STAGE);
@@ -177,34 +184,34 @@ static void *load_stage_from_cbfs(struct cbfs_media *media, const char *name,
if (stage == NULL)
return (void *) -1;
- cbmem_base = (void *)get_cbmem_toc();
- if (cbmem_base == NULL)
+ rmodule_offset =
+ rmodule_calc_region(DYN_CBMEM_ALIGN_SIZE,
+ stage->memlen, ®ion_size, &load_offset);
+
+ ramstage_entry = cbmem_entry_add(CBMEM_ID_RAMSTAGE, region_size);
+
+ if (ramstage_entry == NULL)
return (void *) -1;
- ramstage_base =
- rmodule_find_region_below(cbmem_base, stage->memlen,
- &ramstage_loc,
- &decompression_loc);
+ ramstage_region = cbmem_entry_start(ramstage_entry);
LOG("Decompressing stage %s @ 0x%p (%d bytes)\n",
- name, decompression_loc, stage->memlen);
+ name, &ramstage_region[rmodule_offset], stage->memlen);
if (cbfs_decompress(stage->compression, &stage[1],
- decompression_loc, stage->len))
+ &ramstage_region[rmodule_offset], stage->len))
return (void *) -1;
- if (rmodule_parse(decompression_loc, &ramstage))
+ if (rmodule_parse(&ramstage_region[rmodule_offset], &ramstage))
return (void *) -1;
/* The ramstage is responsible for clearing its own bss. */
- if (rmodule_load_no_clear_bss(ramstage_loc, &ramstage))
+ if (rmodule_load_no_clear_bss(&ramstage_region[load_offset], &ramstage))
return (void *) -1;
entry_point = rmodule_entry(&ramstage);
- ramstage_size = cbmem_base - ramstage_base;
- cache_loaded_ramstage(handoff, ramstage_base, ramstage_size,
- entry_point);
+ cache_loaded_ramstage(handoff, ramstage_entry, entry_point);
return entry_point;
}
@@ -212,6 +219,7 @@ static void *load_stage_from_cbfs(struct cbfs_media *media, const char *name,
void * cbfs_load_stage(struct cbfs_media *media, const char *name)
{
struct romstage_handoff *handoff;
+ const struct cbmem_entry *ramstage;
void *entry;
handoff = romstage_handoff_find_or_add();
@@ -222,9 +230,14 @@ void * cbfs_load_stage(struct cbfs_media *media, const char *name)
} else if (!handoff->s3_resume)
return load_stage_from_cbfs(media, name, handoff);
+ ramstage = cbmem_entry_find(CBMEM_ID_RAMSTAGE);
+
+ if (ramstage == NULL)
+ return load_stage_from_cbfs(name, handoff);
+
/* S3 resume path. Load a cached copy of the loaded ramstage. If
* return value is NULL load from cbfs. */
- entry = load_cached_ramstage(handoff);
+ entry = load_cached_ramstage(handoff, ramstage);
if (entry == NULL)
return load_stage_from_cbfs(name, handoff);
diff --git a/src/lib/hardwaremain.c b/src/lib/hardwaremain.c
index bc18989..a3ee10b 100644
--- a/src/lib/hardwaremain.c
+++ b/src/lib/hardwaremain.c
@@ -85,15 +85,6 @@ void hardwaremain(int boot_complete)
/* FIXME: Is there a better way to handle this? */
init_timer();
- /* CONFIG_EARLY_CBMEM_INIT indicates that romstage initialized
- * the cbmem area. Therefore the table location can be initialized
- * early in ramstage if cbmem_get_table_location() is implemented.
- */
-#if CONFIG_EARLY_CBMEM_INIT
- if (cbmem_get_table_location != NULL &&
- !cbmem_get_table_location(&high_tables_base, &high_tables_size))
- cbmem_initialize();
-#endif
init_cbmem_pre_device();
timestamp_stash(TS_DEVICE_ENUMERATE);
diff --git a/src/lib/rmodule.c b/src/lib/rmodule.c
index d36f9f3..3dd90d8 100644
--- a/src/lib/rmodule.c
+++ b/src/lib/rmodule.c
@@ -16,6 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
@@ -265,16 +266,22 @@ int rmodule_load_no_clear_bss(void *base, struct rmodule *module)
return __rmodule_load(base, module, 0);
}
-void *rmodule_find_region_below(void *addr, size_t rmodule_size,
- void **program_start, void **rmodule_start)
+int rmodule_calc_region(unsigned int region_alignment, size_t rmodule_size,
+ size_t *region_size, int *load_offset)
{
- unsigned long ceiling;
- unsigned long program_base;
- unsigned long placement_loc;
- unsigned long program_begin;
+ /* region_alignment must be a power of 2. */
+ if (region_alignment & (region_alignment - 1))
+ BUG();
- ceiling = (unsigned long)addr;
- /* Place the rmodule just under the ceiling. The rmodule files
+ if (region_alignment < 4096)
+ region_alignment = 4096;
+
+ /* Sanity check rmodule_header size. The code below assumes it is less
+ * than the minimum alignment required. */
+ if (region_alignment < sizeof(struct rmodule_header))
+ BUG();
+
+ /* Place the rmodule according to alignment. The rmodule files
* themselves are packed as a header and a payload, however the rmodule
* itself is linked along with the header. The header starts at address
* 0. Immediately following the header in the file is the program,
@@ -284,13 +291,13 @@ void *rmodule_find_region_below(void *addr, size_t rmodule_size,
* to place the rmodule so that the prgoram falls on the aligned
* address with the header just before it. Therefore, we need at least
* a page to account for the size of the header. */
- program_base = ALIGN((ceiling - (rmodule_size + 4096)), 4096);
+ *region_size = ALIGN(rmodule_size + region_alignment, 4096);
/* The program starts immediately after the header. However,
* it needs to be aligned to a 4KiB boundary. Therefore, adjust the
* program location so that the program lands on a page boundary. The
* layout looks like the following:
*
- * +--------------------------------+ ceiling
+ * +--------------------------------+ region_alignment + region_size
* | >= 0 bytes from alignment |
* +--------------------------------+ program end (4KiB aligned)
* | program size |
@@ -298,14 +305,9 @@ void *rmodule_find_region_below(void *addr, size_t rmodule_size,
* | sizeof(struct rmodule_header) |
* +--------------------------------+ rmodule header start
* | >= 0 bytes from alignment |
- * +--------------------------------+ program_base (4KiB aligned)
+ * +--------------------------------+ region_alignment
*/
- placement_loc = ALIGN(program_base + sizeof(struct rmodule_header),
- 4096) - sizeof(struct rmodule_header);
- program_begin = placement_loc + sizeof(struct rmodule_header);
-
- *program_start = (void *)program_begin;
- *rmodule_start = (void *)placement_loc;
+ *load_offset = region_alignment;
- return (void *)program_base;
+ return region_alignment - sizeof(struct rmodule_header);
}
diff --git a/src/northbridge/intel/haswell/northbridge.c b/src/northbridge/intel/haswell/northbridge.c
index 8708138..53c2f36 100644
--- a/src/northbridge/intel/haswell/northbridge.c
+++ b/src/northbridge/intel/haswell/northbridge.c
@@ -543,21 +543,6 @@ static void northbridge_init(struct device *dev)
MCHBAR32(0x5500) = 0x00100001;
}
-#if CONFIG_EARLY_CBMEM_INIT
-int cbmem_get_table_location(uint64_t *tables_base, uint64_t *tables_size)
-{
- uint32_t tseg;
-
- /* Put the CBMEM location just below TSEG. */
- *tables_size = HIGH_MEMORY_SIZE;
- tseg = (pci_read_config32(dev_find_slot(0, PCI_DEVFN(0, 0)),
- TSEG) & ~((1 << 20) - 1)) - HIGH_MEMORY_SIZE;
- *tables_base = tseg;
-
- return 0;
-}
-#endif
-
static void northbridge_enable(device_t dev)
{
#if CONFIG_HAVE_ACPI_RESUME
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2848
-gerrit
commit 5e9b74eb4a2de4175aea8df89947ffaff8d52722
Author: Aaron Durbin <adurbin(a)chromium.org>
Date: Wed Mar 13 12:41:44 2013 -0500
cbmem: dynamic cbmem support
This patch adds a parallel implementation of cbmem that supports
dynamic sizing. The original implementation relied on reserving
a fixed-size block of memory for adding cbmem entries. In order to
allow for more flexibility for adding cbmem allocations the dynamic
cbmem infrastructure was developed as an alternative to the fixed block
approach. Also, the amount of memory to reserve for cbmem allocations
does not need to be known prior to the first allocation.
The dynamic cbmem code implements the same API as the existing cbmem
code except for cbmem_init() and cbmem_reinit(). The add and find
routines behave the same way. The dynamic cbmem infrastructure
uses a top down allocator that starts allocating from a board/chipset
defined function cbmem_top(). A root pointer lives just below
cbmem_top(). In turn that pointer points to the root block which
contains the entries for all the large alloctations. The corresponding
block for each large allocation falls just below the previous entry.
It should be noted that this implementation rounds all allocations
up to a 4096 byte granularity. Though a packing allocator could
be written for small allocations it was deemed OK to just fragment
the memory as there shouldn't be that many small allocations. The
result is less code with a tradeoff of some wasted memory.
+----------------------+ <- cbmem_top()
| +----| root pointer |
| | +----------------------+
| | | |--------+
| +--->| root block |-----+ |
| +----------------------+ | |
| | | | |
| | | | |
| | alloc N |<----+ |
| +----------------------+ |
| | | |
| | | |
\|/ | alloc N + 1 |<-------+
v +----------------------+
In addition to preserving the previous cbmem API, the dynamic
cbmem API allows for removing blocks from cbmem. This allows for
the boot process to allocate memory that can be discarded after
it's been used for performing more complex boot tasks in romstage.
In order to plumb this support in there were some issues to work
around regarding writing of coreboot tables. There were a few
assumptions to how cbmem was layed out which dictated some ifdef
guarding and other runtime checks so as not to incorrectly
tag the e820 and coreboot memory tables.
The example shown below is using dynamic cbmem infrastructure.
The reserved memory for cbmem is less than 512KiB.
coreboot memory table:
0. 0000000000000000-0000000000000fff: CONFIGURATION TABLES
1. 0000000000001000-000000000002ffff: RAM
2. 0000000000030000-000000000003ffff: RESERVED
3. 0000000000040000-000000000009ffff: RAM
4. 00000000000a0000-00000000000fffff: RESERVED
5. 0000000000100000-0000000000efffff: RAM
6. 0000000000f00000-0000000000ffffff: RESERVED
7. 0000000001000000-000000007bf80fff: RAM
8. 000000007bf81000-000000007bffffff: CONFIGURATION TABLES
9. 000000007c000000-000000007e9fffff: RESERVED
10. 00000000f0000000-00000000f3ffffff: RESERVED
11. 00000000fed10000-00000000fed19fff: RESERVED
12. 00000000fed84000-00000000fed84fff: RESERVED
13. 0000000100000000-00000001005fffff: RAM
Wrote coreboot table at: 7bf81000, 0x39c bytes, checksum f5bf
coreboot table: 948 bytes.
CBMEM ROOT 0. 7bfff000 00001000
MRC DATA 1. 7bffe000 00001000
ROMSTAGE 2. 7bffd000 00001000
TIME STAMP 3. 7bffc000 00001000
ROMSTG STCK 4. 7bff7000 00005000
CONSOLE 5. 7bfe7000 00010000
VBOOT 6. 7bfe6000 00001000
RAMSTAGE 7. 7bf98000 0004e000
GDT 8. 7bf97000 00001000
ACPI 9. 7bf8b000 0000c000
ACPI GNVS 10. 7bf8a000 00001000
SMBIOS 11. 7bf89000 00001000
COREBOOT 12. 7bf81000 00008000
And the corresponding e820 entries:
BIOS-e820: [mem 0x0000000000000000-0x0000000000000fff] type 16
BIOS-e820: [mem 0x0000000000001000-0x000000000002ffff] usable
BIOS-e820: [mem 0x0000000000030000-0x000000000003ffff] reserved
BIOS-e820: [mem 0x0000000000040000-0x000000000009ffff] usable
BIOS-e820: [mem 0x00000000000a0000-0x00000000000fffff] reserved
BIOS-e820: [mem 0x0000000000100000-0x0000000000efffff] usable
BIOS-e820: [mem 0x0000000000f00000-0x0000000000ffffff] reserved
BIOS-e820: [mem 0x0000000001000000-0x000000007bf80fff] usable
BIOS-e820: [mem 0x000000007bf81000-0x000000007bffffff] type 16
BIOS-e820: [mem 0x000000007c000000-0x000000007e9fffff] reserved
BIOS-e820: [mem 0x00000000f0000000-0x00000000f3ffffff] reserved
BIOS-e820: [mem 0x00000000fed10000-0x00000000fed19fff] reserved
BIOS-e820: [mem 0x00000000fed84000-0x00000000fed84fff] reserved
BIOS-e820: [mem 0x0000000100000000-0x00000001005fffff] usable
Change-Id: Ie3bca52211800a8652a77ca684140cfc9b3b9a6b
Signed-off-by: Aaron Durbin <adurbin(a)chromium.org>
---
src/Kconfig | 8 +
src/arch/x86/boot/coreboot_table.c | 22 +-
src/arch/x86/boot/tables.c | 2 +
src/include/cbmem.h | 104 ++++++++-
src/lib/Makefile.inc | 12 +-
src/lib/cbmem.c | 38 ++--
src/lib/cbmem_info.c | 69 ++++++
src/lib/dynamic_cbmem.c | 452 +++++++++++++++++++++++++++++++++++++
src/lib/hardwaremain.c | 7 +-
9 files changed, 671 insertions(+), 43 deletions(-)
diff --git a/src/Kconfig b/src/Kconfig
index 7a8985c..0297970 100644
--- a/src/Kconfig
+++ b/src/Kconfig
@@ -179,6 +179,14 @@ config EARLY_CBMEM_INIT
some, for instance, execution timestamps. It needs support in
romstage.c and should be enabled by the board's Kconfig.
+config DYNAMIC_CBMEM
+ bool "The CBMEM space is dynamically grown."
+ default n
+ help
+ Instead of reserving a static amount of CBMEM space the CBMEM
+ area grows dynamically. CBMEM can be used both in romstage (after
+ memory initialization) and ramstage.
+
config COLLECT_TIMESTAMPS
bool "Create a table of timestamps collected during boot"
depends on EARLY_CBMEM_INIT
diff --git a/src/arch/x86/boot/coreboot_table.c b/src/arch/x86/boot/coreboot_table.c
index 463f723..06d3888 100644
--- a/src/arch/x86/boot/coreboot_table.c
+++ b/src/arch/x86/boot/coreboot_table.c
@@ -355,6 +355,9 @@ static void lb_memory_range(struct lb_memory *mem,
static void lb_reserve_table_memory(struct lb_header *head)
{
+/* Dynamic cbmem has already reserved the memory where the coreboot tables
+ * reside. Therefore, there is nothing to fix up. */
+#if !CONFIG_DYNAMIC_CBMEM
struct lb_record *last_rec;
struct lb_memory *mem;
uint64_t start;
@@ -383,6 +386,7 @@ static void lb_reserve_table_memory(struct lb_header *head)
mem->map[i].size = pack_lb64(map_end - end);
}
}
+#endif
}
static unsigned long lb_table_fini(struct lb_header *head, int fixup)
@@ -664,14 +668,20 @@ unsigned long write_coreboot_table(
lb_add_memory_range(mem, LB_MEM_TABLE,
low_table_start, low_table_end - low_table_start);
- /* Record the pirq table, acpi tables, and maybe the mptable */
- lb_add_memory_range(mem, LB_MEM_TABLE,
- rom_table_start, rom_table_end-rom_table_start);
-
- printk(BIOS_DEBUG, "Adding high table area\n");
- // should this be LB_MEM_ACPI?
+ /* Record the pirq table, acpi tables, and maybe the mptable. However,
+ * these only need to be added when the rom_table is sitting below
+ * 1MiB. If it isn't that means high tables are being written.
+ * The code below handles high tables correctly. */
+ if (rom_table_end <= (1 << 20))
+ lb_add_memory_range(mem, LB_MEM_TABLE,
+ rom_table_start, rom_table_end-rom_table_start);
+
+#if CONFIG_DYNAMIC_CBMEM
+ cbmem_add_lb_mem(mem);
+#else /* CONFIG_DYNAMIC_CBMEM */
lb_add_memory_range(mem, LB_MEM_TABLE,
high_tables_base, high_tables_size);
+#endif /* CONFIG_DYNAMIC_CBMEM */
/* Add reserved regions */
add_lb_reserved(mem);
diff --git a/src/arch/x86/boot/tables.c b/src/arch/x86/boot/tables.c
index 294a10c..d842e73 100644
--- a/src/arch/x86/boot/tables.c
+++ b/src/arch/x86/boot/tables.c
@@ -53,6 +53,7 @@ struct lb_memory *write_tables(void)
*/
unsigned long high_table_pointer;
+#if !CONFIG_DYNAMIC_CBMEM
if (!high_tables_base) {
printk(BIOS_ERR, "ERROR: High Tables Base is not set.\n");
// Are there any boards without?
@@ -60,6 +61,7 @@ struct lb_memory *write_tables(void)
}
printk(BIOS_DEBUG, "High Tables Base is %llx.\n", high_tables_base);
+#endif
rom_table_start = 0xf0000;
rom_table_end = 0xf0000;
diff --git a/src/include/cbmem.h b/src/include/cbmem.h
index 1212cb2..41f5971 100644
--- a/src/include/cbmem.h
+++ b/src/include/cbmem.h
@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2009 coresystems GmbH
+ * Copyright (C) 2013 Google, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -62,9 +63,72 @@
#define CBMEM_ID_ELOG 0x454c4f47
#define CBMEM_ID_COVERAGE 0x47434f56
#define CBMEM_ID_ROMSTAGE_INFO 0x47545352
+#define CBMEM_ID_ROMSTAGE_RAM_STACK 0x90357ac4
+#define CBMEM_ID_RAMSTAGE 0x9a357a9e
+#define CBMEM_ID_RAMSTAGE_CACHE 0x9a3ca54e
+#define CBMEM_ID_ROOT 0xff4007ff
#define CBMEM_ID_NONE 0x00000000
#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+struct cbmem_entry;
+
+#if CONFIG_DYNAMIC_CBMEM
+
+/*
+ * The dynamic cbmem infrastructure allows for growing cbmem dynamically as
+ * things are added. It requires an external function, cbmem_top(), to be
+ * implemented by the board or chipset to define the upper address where
+ * cbmem lives. This address is required to be a 32-bit address. Additionally,
+ * the address needs to be consistent in both romstage and ramstage. The
+ * dynamic cbmem infrasturue allocates new regions below the last allocated
+ * region. Regions are defined by a cbmem_entry struct that is opaque. Regions
+ * may be removed, but the last one added is the only that can be removed.
+ *
+ * Dynamic cbmem has two allocators within it. All allocators use a top down
+ * allocation scheme. However, there are 2 modes for each allocation depending
+ * on the requested size. There are large allocations and small allocations.
+ * An allocation is considered to be small when it is less than or equal to
+ * DYN_CBMEM_ALIGN_SIZE / 2. The smaller allocations are fit into a larger
+ * allocation region.
+ */
+
+#define DYN_CBMEM_ALIGN_SIZE (4096)
+
+/* Initialze cbmem to be empty. */
+void cbmem_initialize_empty(void);
+
+/* Return the top address for dynamic cbmem. The address returned needs to
+ * be consistent across romstage and ramstage, and it is required to be
+ * below 4GiB. */
+void *cbmem_top(void);
+
+/* Add a cbmem entry of a given size and id. These return NULL on failure. The
+ * add function performs a find first and do not check against the original
+ * size. */
+const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size);
+
+/* Find a cbmem entry of a given id. These return NULL on failure. */
+const struct cbmem_entry *cbmem_entry_find(u32 id);
+
+/* Remove a region defined by a cbmem_entry. Returns 0 on success, < 0 on
+ * error. Note: A cbmem_entry cannot be removed unless it was the last one
+ * added. */
+int cbmem_entry_remove(const struct cbmem_entry *entry);
+
+/* cbmem_entry accessors to get pointer and size of a cbmem_entry. */
+void *cbmem_entry_start(const struct cbmem_entry *entry);
+u64 cbmem_entry_size(const struct cbmem_entry *entry);
+
+#ifndef __PRE_RAM__
+/* Add the cbmem memory used to the memory tables. */
+struct lb_memory;
+void cbmem_add_lb_mem(struct lb_memory *mem);
+#endif /* __PRE_RAM__ */
+
+#else /* !CONFIG_DYNAMIC_CBMEM */
+
#ifndef __PRE_RAM__
extern uint64_t high_tables_base, high_tables_size;
#if CONFIG_EARLY_CBMEM_INIT
@@ -72,22 +136,44 @@ extern uint64_t high_tables_base, high_tables_size;
int __attribute__((weak)) cbmem_get_table_location(uint64_t *tables_base,
uint64_t *tables_size);
#endif
+void set_cbmem_toc(struct cbmem_entry *);
#endif
-int cbmem_initialize(void);
-
void cbmem_init(u64 baseaddr, u64 size);
int cbmem_reinit(u64 baseaddr);
+
+extern struct cbmem_entry *get_cbmem_toc(void);
+
+#endif /* CONFIG_DYNAMIC_CBMEM */
+
+/* Common API between cbmem and dynamic cbmem. */
+
+/* By default cbmem is attempted to be recovered. Returns 0 if cbmem was
+ * recovered or 1 if cbmem had to be reinitialized. */
+int cbmem_initialize(void);
+/* Add a cbmem entry of a given size and id. These return NULL on failure. The
+ * add function performs a find first and do not check against the original
+ * size. */
void *cbmem_add(u32 id, u64 size);
+/* Find a cbmem entry of a given id. These return NULL on failure. */
void *cbmem_find(u32 id);
+
+#ifndef __PRE_RAM__
+/* Ramstage only functions. */
void cbmem_list(void);
void cbmem_arch_init(void);
+void __attribute__((weak)) cbmem_post_handling(void);
+void cbmem_print_entry(int n, u32 id, u64 start, u64 size);
+/* The pre|post device cbmem initialization functions are for the
+ * ramstage main to call. When cbmem is actually initialized depends on
+ * the cbmem implementation. */
+void init_cbmem_pre_device(void);
+void init_cbmem_post_device(void);
+#else
+static inline void cbmem_arch_init(void) {}
+#endif /* __PRE_RAM__ */
-extern struct cbmem_entry *get_cbmem_toc(void);
+#endif /* __ASSEMBLER__ */
-#ifndef __PRE_RAM__
-void set_cbmem_toc(struct cbmem_entry *);
-void __attribute__((weak)) cbmem_post_handling(void);
-#endif
-#endif
-#endif
+
+#endif /* _CBMEM_H_ */
diff --git a/src/lib/Makefile.inc b/src/lib/Makefile.inc
index fe57f9f..c0372c5 100644
--- a/src/lib/Makefile.inc
+++ b/src/lib/Makefile.inc
@@ -42,7 +42,6 @@ romstage-y += cbfs.c
romstage-y += lzma.c
#romstage-y += lzmadecode.c
romstage-$(CONFIG_CACHE_AS_RAM) += ramtest.c
-romstage-$(CONFIG_HAVE_ACPI_RESUME) += cbmem.c
romstage-$(CONFIG_CONSOLE_SERIAL8250) += uart8250.c
romstage-$(CONFIG_CONSOLE_SERIAL8250MEM) += uart8250mem.c
romstage-$(CONFIG_CONSOLE_CBMEM) += cbmem_console.c
@@ -76,7 +75,6 @@ ramstage-y += lzma.c
ramstage-y += stack.c
ramstage-$(CONFIG_ARCH_X86) += gcc.c
ramstage-y += clog2.c
-ramstage-y += cbmem.c
ramstage-$(CONFIG_CONSOLE_SERIAL8250) += uart8250.c
ramstage-$(CONFIG_CONSOLE_SERIAL8250MEM) += uart8250mem.c
ramstage-$(CONFIG_CONSOLE_CBMEM) += cbmem_console.c
@@ -86,6 +84,16 @@ ramstage-$(CONFIG_TRACE) += trace.c
ramstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c
ramstage-$(CONFIG_COVERAGE) += libgcov.c
+# The CBMEM implementations are chosen based on CONFIG_DYNAMIC_CBMEM.
+ifeq ($(CONFIG_DYNAMIC_CBMEM),y)
+ramstage-y += dynamic_cbmem.c
+romstage-y += dynamic_cbmem.c
+else
+ramstage-y += cbmem.c
+romstage-$(CONFIG_HAVE_ACPI_RESUME) += cbmem.c
+endif # CONFIG_DYNAMIC_CBMEM
+ramstage-y += cbmem_info.c
+
ramstage-$(CONFIG_CONSOLE_NE2K) += ne2k.c
ifneq ($(CONFIG_HAVE_ARCH_MEMSET),y)
diff --git a/src/lib/cbmem.c b/src/lib/cbmem.c
index 5663cc8..f4fd3b6 100644
--- a/src/lib/cbmem.c
+++ b/src/lib/cbmem.c
@@ -232,6 +232,20 @@ int cbmem_initialize(void)
#endif
#ifndef __PRE_RAM__
+/* cbmem cannot be initialized before device drivers, but it can be initialized
+ * after the drivers have run when CONFIG_WRITE_HIGH_TABLES is enabled. */
+void init_cbmem_pre_device(void) {}
+
+void init_cbmem_post_device(void)
+{
+#if CONFIG_WRITE_HIGH_TABLES
+ cbmem_initialize();
+#if CONFIG_CONSOLE_CBMEM
+ cbmemc_reinit();
+#endif
+#endif
+}
+
void cbmem_list(void)
{
struct cbmem_entry *cbmem_toc;
@@ -245,28 +259,8 @@ void cbmem_list(void)
if (cbmem_toc[i].magic != CBMEM_MAGIC)
continue;
- printk(BIOS_DEBUG, "%2d. ", i);
- switch (cbmem_toc[i].id) {
- case CBMEM_ID_FREESPACE: printk(BIOS_DEBUG, "FREE SPACE "); break;
- case CBMEM_ID_GDT: printk(BIOS_DEBUG, "GDT "); break;
- case CBMEM_ID_ACPI: printk(BIOS_DEBUG, "ACPI "); break;
- case CBMEM_ID_CBTABLE: printk(BIOS_DEBUG, "COREBOOT "); break;
- case CBMEM_ID_PIRQ: printk(BIOS_DEBUG, "IRQ TABLE "); break;
- case CBMEM_ID_MPTABLE: printk(BIOS_DEBUG, "SMP TABLE "); break;
- case CBMEM_ID_RESUME: printk(BIOS_DEBUG, "ACPI RESUME"); break;
- case CBMEM_ID_RESUME_SCRATCH: printk(BIOS_DEBUG, "ACPISCRATCH"); break;
- case CBMEM_ID_ACPI_GNVS: printk(BIOS_DEBUG, "ACPI GNVS "); break;
- case CBMEM_ID_SMBIOS: printk(BIOS_DEBUG, "SMBIOS "); break;
- case CBMEM_ID_TIMESTAMP: printk(BIOS_DEBUG, "TIME STAMP "); break;
- case CBMEM_ID_MRCDATA: printk(BIOS_DEBUG, "MRC DATA "); break;
- case CBMEM_ID_CONSOLE: printk(BIOS_DEBUG, "CONSOLE "); break;
- case CBMEM_ID_ELOG: printk(BIOS_DEBUG, "ELOG "); break;
- case CBMEM_ID_COVERAGE: printk(BIOS_DEBUG, "COVERAGE "); break;
- case CBMEM_ID_ROMSTAGE_INFO: printk(BIOS_DEBUG, "ROMSTAGE "); break;
- default: printk(BIOS_DEBUG, "%08x ", cbmem_toc[i].id);
- }
- printk(BIOS_DEBUG, "%08llx ", cbmem_toc[i].base);
- printk(BIOS_DEBUG, "%08llx\n", cbmem_toc[i].size);
+ cbmem_print_entry(i, cbmem_toc[i].id, cbmem_toc[i].base,
+ cbmem_toc[i].size);
}
}
#endif
diff --git a/src/lib/cbmem_info.c b/src/lib/cbmem_info.c
new file mode 100644
index 0000000..aaf5840
--- /dev/null
+++ b/src/lib/cbmem_info.c
@@ -0,0 +1,69 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied wacbmem_entryanty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <console/console.h>
+#include <cbmem.h>
+#include <stdlib.h>
+
+static struct cbmem_id_to_name {
+ u32 id;
+ const char *name;
+} cbmem_ids[] = {
+ { CBMEM_ID_FREESPACE, "FREE SPACE " },
+ { CBMEM_ID_GDT, "GDT " },
+ { CBMEM_ID_ACPI, "ACPI " },
+ { CBMEM_ID_CBTABLE, "COREBOOT " },
+ { CBMEM_ID_PIRQ, "IRQ TABLE " },
+ { CBMEM_ID_MPTABLE, "SMP TABLE " },
+ { CBMEM_ID_RESUME, "ACPI RESUME" },
+ { CBMEM_ID_RESUME_SCRATCH, "ACPISCRATCH" },
+ { CBMEM_ID_ACPI_GNVS, "ACPI GNVS " },
+ { CBMEM_ID_SMBIOS, "SMBIOS " },
+ { CBMEM_ID_TIMESTAMP, "TIME STAMP " },
+ { CBMEM_ID_MRCDATA, "MRC DATA " },
+ { CBMEM_ID_CONSOLE, "CONSOLE " },
+ { CBMEM_ID_ELOG, "ELOG " },
+ { CBMEM_ID_COVERAGE, "COVERAGE " },
+ { CBMEM_ID_ROMSTAGE_INFO, "ROMSTAGE " },
+ { CBMEM_ID_ROMSTAGE_RAM_STACK, "ROMSTG STCK" },
+ { CBMEM_ID_RAMSTAGE, "RAMSTAGE " },
+ { CBMEM_ID_RAMSTAGE_CACHE, "RAMSTAGE $ " },
+ { CBMEM_ID_ROOT, "CBMEM ROOT " },
+};
+
+void cbmem_print_entry(int n, u32 id, u64 base, u64 size)
+{
+ int i;
+ const char *name;
+
+ name = NULL;
+ for (i = 0; i < ARRAY_SIZE(cbmem_ids); i++) {
+ if (cbmem_ids[i].id == id) {
+ name = cbmem_ids[i].name;
+ break;
+ }
+ }
+
+ if (name == NULL)
+ printk(BIOS_DEBUG, "%08x ", id);
+ else
+ printk(BIOS_DEBUG, "%s", name);
+ printk(BIOS_DEBUG, "%2d. ", n);
+ printk(BIOS_DEBUG, "%08llx ", base);
+ printk(BIOS_DEBUG, "%08llx\n", size);
+}
diff --git a/src/lib/dynamic_cbmem.c b/src/lib/dynamic_cbmem.c
new file mode 100644
index 0000000..ae6c87a
--- /dev/null
+++ b/src/lib/dynamic_cbmem.c
@@ -0,0 +1,452 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied wacbmem_entryanty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <boot/tables.h>
+#include <console/console.h>
+#include <cbmem.h>
+#include <string.h>
+#include <stdlib.h>
+#if CONFIG_HAVE_ACPI_RESUME && !defined(__PRE_RAM__)
+#include <arch/acpi.h>
+#endif
+
+#ifndef UINT_MAX
+#define UINT_MAX 4294967295U
+#endif
+
+/* ACPI resume needs to be cleared in the fail-to-recover case, but that
+ * condition is only handled during ramstage. */
+#if CONFIG_HAVE_ACPI_RESUME && !defined(__PRE_RAM__)
+static inline void cbmem_handle_acpi_resume(void)
+{
+ /* Something went wrong, our high memory area got wiped */
+ if (acpi_slp_type == 3 || acpi_slp_type == 2)
+ acpi_slp_type = 0;
+}
+#else
+static inline void cbmem_handle_acpi_resume(void) {}
+#endif
+
+/*
+ * The dynamic cbmem code uses a root region. The root region boundary
+ * addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below
+ * the address returned by cbmem_top() is a pointer that points to the
+ * root data structure. The root data structure provides the book keeping
+ * for each large entry.
+ */
+
+/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
+#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
+#define CBMEM_POINTER_MAGIC 0xc0389479
+#define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC)
+
+/* The cbmem_root_pointer structure lives just below address returned
+ * from cbmem_top(). It points to the root data structure that
+ * maintains the entries. */
+struct cbmem_root_pointer {
+ u32 magic;
+ u32 root;
+} __attribute__((packed));
+
+struct cbmem_entry {
+ u32 magic;
+ u32 start;
+ u32 size;
+ u32 id;
+} __attribute__((packed));
+
+struct cbmem_root {
+ u32 max_entries;
+ u32 num_entries;
+ u32 locked;
+ u32 size;
+ struct cbmem_entry entries[0];
+} __attribute__((packed));
+
+
+static inline void *cbmem_top_cached(void)
+{
+#if !defined(__PRE_RAM__)
+ static void *cached_cbmem_top;
+
+ if (cached_cbmem_top == NULL)
+ cached_cbmem_top = cbmem_top();
+
+ return cached_cbmem_top;
+#else
+ return cbmem_top();
+#endif
+}
+
+static inline void *get_top_aligned(void)
+{
+ unsigned long top;
+
+ /* Align down what is returned from cbmem_top(). */
+ top = (unsigned long)cbmem_top_cached();
+ top &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
+
+ return (void *)top;
+}
+
+static inline void *get_root(void)
+{
+ unsigned long pointer_addr;
+ struct cbmem_root_pointer *pointer;
+
+ pointer_addr = (unsigned long)get_top_aligned();
+ pointer_addr -= sizeof(struct cbmem_root_pointer);
+
+ pointer = (void *)pointer_addr;
+ if (pointer->magic != CBMEM_POINTER_MAGIC)
+ return NULL;
+
+ return (void *)pointer->root;
+}
+
+static inline void cbmem_entry_assign(struct cbmem_entry *entry,
+ u32 id, u32 start, u32 size)
+{
+ entry->magic = CBMEM_ENTRY_MAGIC;
+ entry->start = start;
+ entry->size = size;
+ entry->id = id;
+}
+
+static inline const struct cbmem_entry *
+cbmem_entry_append(struct cbmem_root *root, u32 id, u32 start, u32 size)
+{
+ struct cbmem_entry *cbmem_entry;
+
+ cbmem_entry = &root->entries[root->num_entries];
+ root->num_entries++;
+
+ cbmem_entry_assign(cbmem_entry, id, start, size);
+
+ return cbmem_entry;
+}
+
+void cbmem_initialize_empty(void)
+{
+ unsigned long pointer_addr;
+ unsigned long root_addr;
+ unsigned long max_entries;
+ struct cbmem_root *root;
+ struct cbmem_root_pointer *pointer;
+
+ /* Place the root pointer and the root. The number of entries is
+ * dictated by difference between the root address and the pointer
+ * where the root address is aligned down to
+ * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the
+ * address returned by get_top_aligned(). */
+ pointer_addr = (unsigned long)get_top_aligned();
+ root_addr = pointer_addr - ROOT_MIN_SIZE;
+ root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
+ pointer_addr -= sizeof(struct cbmem_root_pointer);
+
+ max_entries = (pointer_addr - (root_addr + sizeof(*root))) /
+ sizeof(struct cbmem_entry);
+
+ pointer = (void *)pointer_addr;
+ pointer->magic = CBMEM_POINTER_MAGIC;
+ pointer->root = root_addr;
+
+ root = (void *)root_addr;
+ root->max_entries = max_entries;
+ root->num_entries = 0;
+ root->locked = 0;
+ root->size = pointer_addr - root_addr +
+ sizeof(struct cbmem_root_pointer);
+
+ /* Add an entry covering the root region. */
+ cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size);
+
+ printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n",
+ root, root->max_entries);
+
+ cbmem_arch_init();
+}
+
+static inline int cbmem_fail_recovery(void)
+{
+ cbmem_initialize_empty();
+ cbmem_handle_acpi_resume();
+ return 1;
+}
+
+static int validate_entries(struct cbmem_root *root)
+{
+ unsigned int i;
+ u32 current_end;
+
+ current_end = (u32)get_top_aligned();
+
+ printk(BIOS_DEBUG, "CBMEM: recovering %d/%d entries from root @ %p\n",
+ root->num_entries, root->max_entries, root);
+
+ /* Check that all regions are properly aligned and are just below
+ * the previous entry */
+ for (i = 0; i < root->num_entries; i++) {
+ struct cbmem_entry *entry = &root->entries[i];
+
+ if (entry->magic != CBMEM_ENTRY_MAGIC)
+ return -1;
+
+ if (entry->start & (DYN_CBMEM_ALIGN_SIZE - 1))
+ return -1;
+
+ if (entry->start + entry->size != current_end)
+ return -1;
+
+ current_end = entry->start;
+ }
+
+ return 0;
+}
+
+int cbmem_initialize(void)
+{
+ struct cbmem_root *root;
+ void *top_according_to_root;
+
+ root = get_root();
+
+ /* No recovery possible since root couldn't be recovered. */
+ if (root == NULL)
+ return cbmem_fail_recovery();
+
+ /* Sanity check the root. */
+ top_according_to_root = (void *)(root->size + (unsigned long)root);
+ if (get_top_aligned() != top_according_to_root)
+ return cbmem_fail_recovery();
+
+ if (root->num_entries > root->max_entries)
+ return cbmem_fail_recovery();
+
+ if ((root->max_entries * sizeof(struct cbmem_entry)) >
+ (root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root)))
+ return cbmem_fail_recovery();
+
+ /* Validate current entries. */
+ if (validate_entries(root))
+ return cbmem_fail_recovery();
+
+#if defined(__PRE_RAM__)
+ /* Lock the root in the romstage on a recovery. The assumption is that
+ * recovery is called during romstage on the S3 resume path. */
+ root->locked = 1;
+#endif
+
+ cbmem_arch_init();
+
+ /* Recovery successful. */
+ return 0;
+}
+
+static void *cbmem_base(void)
+{
+ struct cbmem_root *root;
+ u32 low_addr;
+
+ root = get_root();
+
+ if (root == NULL)
+ return NULL;
+
+ low_addr = (u32)root;
+
+ /* Assume the lowest address is the last one added. */
+ if (root->num_entries > 0) {
+ low_addr = root->entries[root->num_entries - 1].start;
+ }
+
+ return (void *)low_addr;
+}
+
+
+const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
+{
+ struct cbmem_root *root;
+ const struct cbmem_entry *entry;
+ unsigned long base;;
+ u32 size;
+ u32 aligned_size;
+
+ entry = cbmem_entry_find(id);
+
+ if (entry != NULL)
+ return entry;
+
+ /* Only handle sizes <= UINT_MAX internally. */
+ if (size64 > (u64)UINT_MAX)
+ return NULL;
+
+ size = size64;
+
+ root = get_root();
+
+ if (root == NULL)
+ return NULL;
+
+ /* Nothing can be added once it is locked down. */
+ if (root->locked)
+ return NULL;
+
+ if (root->max_entries == root->num_entries)
+ return NULL;
+
+ aligned_size = ALIGN(size, DYN_CBMEM_ALIGN_SIZE);
+ base = (unsigned long)cbmem_base();
+ base -= aligned_size;
+
+ return cbmem_entry_append(root, id, base, aligned_size);
+}
+
+void *cbmem_add(u32 id, u64 size)
+{
+ const struct cbmem_entry *entry;
+
+ entry = cbmem_entry_add(id, size);
+
+ if (entry == NULL)
+ return NULL;
+
+ return cbmem_entry_start(entry);
+}
+
+/* Retrieve a region provided a given id. */
+const struct cbmem_entry *cbmem_entry_find(u32 id)
+{
+ struct cbmem_root *root;
+ const struct cbmem_entry *entry;
+ unsigned int i;
+
+ root = get_root();
+
+ if (root == NULL)
+ return NULL;
+
+ entry = NULL;
+
+ for (i = 0; i < root->num_entries; i++) {
+ if (root->entries[i].id == id) {
+ entry = &root->entries[i];
+ break;
+ }
+ }
+
+ return entry;
+}
+
+void *cbmem_find(u32 id)
+{
+ const struct cbmem_entry *entry;
+
+ entry = cbmem_entry_find(id);
+
+ if (entry == NULL)
+ return NULL;
+
+ return cbmem_entry_start(entry);
+}
+
+/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
+ * cannot be removed unless it was the last one added. */
+int cbmem_entry_remove(const struct cbmem_entry *entry)
+{
+ unsigned long entry_num;
+ struct cbmem_root *root;
+
+ root = get_root();
+
+ if (root == NULL)
+ return -1;
+
+ if (root->num_entries == 0)
+ return -1;
+
+ /* Nothing can be removed. */
+ if (root->locked)
+ return -1;
+
+ entry_num = entry - &root->entries[0];
+
+ /* If the entry is the last one in the root it can be removed. */
+ if (entry_num == (root->num_entries - 1)) {
+ root->num_entries--;
+ return 0;
+ }
+
+ return -1;
+}
+
+u64 cbmem_entry_size(const struct cbmem_entry *entry)
+{
+ return entry->size;
+}
+
+void *cbmem_entry_start(const struct cbmem_entry *entry)
+{
+ return (void *)entry->start;
+}
+
+
+#if !defined(__PRE_RAM__)
+/* selected cbmem can be initialized early in ramstage. Additionally, that
+ * means cbmem console can be reinitialized early as well. The post_device
+ * function is empty since cbmem was initialized early in ramstage. */
+void init_cbmem_pre_device(void)
+{
+ cbmem_initialize();
+#if CONFIG_CONSOLE_CBMEM
+ cbmemc_reinit();
+#endif /* CONFIG_CONSOLE_CBMEM */
+}
+
+void init_cbmem_post_device(void) {}
+
+void cbmem_add_lb_mem(struct lb_memory *mem)
+{
+ unsigned long base;
+ unsigned long top;
+
+ base = (unsigned long)cbmem_base();
+ top = (unsigned long)get_top_aligned();
+ lb_add_memory_range(mem, LB_MEM_TABLE, base, top - base);
+}
+
+void cbmem_list(void)
+{
+ unsigned int i;
+ struct cbmem_root *root;
+
+ root = get_root();
+
+ if (root == NULL)
+ return;
+
+ for (i = 0; i < root->num_entries; i++) {
+ struct cbmem_entry *entry;
+
+ entry = &root->entries[i];
+
+ cbmem_print_entry(i, entry->id, entry->start, entry->size);
+ }
+}
+#endif /* __PRE_RAM__ */
diff --git a/src/lib/hardwaremain.c b/src/lib/hardwaremain.c
index b29cc93..bc18989 100644
--- a/src/lib/hardwaremain.c
+++ b/src/lib/hardwaremain.c
@@ -94,6 +94,7 @@ void hardwaremain(int boot_complete)
!cbmem_get_table_location(&high_tables_base, &high_tables_size))
cbmem_initialize();
#endif
+ init_cbmem_pre_device();
timestamp_stash(TS_DEVICE_ENUMERATE);
@@ -121,10 +122,8 @@ void hardwaremain(int boot_complete)
timestamp_stash(TS_DEVICE_DONE);
- cbmem_initialize();
-#if CONFIG_CONSOLE_CBMEM
- cbmemc_reinit();
-#endif
+ init_cbmem_post_device();
+
timestamp_sync();
#if CONFIG_HAVE_ACPI_RESUME
Stefan Reinauer (stefan.reinauer(a)coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2847
-gerrit
commit 715c6e8a6b6b8e158b8ac5635800af31163c3205
Author: Duncan Laurie <dlaurie(a)chromium.org>
Date: Thu Mar 7 14:20:13 2013 -0800
haswell: drop memory reservation for sandybridge GPU bug
This is not needed in haswell.
Change-Id: I23817c2e01be33855f9d5a5e389e8ccb7954c0e2
Signed-off-by: Duncan Laurie <dlaurie(a)chromium.org>
---
src/northbridge/intel/haswell/acpi/haswell.asl | 5 -----
1 file changed, 5 deletions(-)
diff --git a/src/northbridge/intel/haswell/acpi/haswell.asl b/src/northbridge/intel/haswell/acpi/haswell.asl
index 49d55e7..1634fe3 100644
--- a/src/northbridge/intel/haswell/acpi/haswell.asl
+++ b/src/northbridge/intel/haswell/acpi/haswell.asl
@@ -42,11 +42,6 @@ Device (PDRC)
Memory32Fixed(ReadWrite, CONFIG_CHROMEOS_RAMOOPS_RAM_START,
CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE)
#endif
-
- /* Required for SandyBridge sighting 3715511 */
- /* FIXME: Is this still required? */
- Memory32Fixed(ReadWrite, 0x20000000, 0x00200000)
- Memory32Fixed(ReadWrite, 0x40000000, 0x00200000)
})
// Current Resource Settings