Leroy P Leahy (leroy.p.leahy@intel.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/10148
-gerrit
commit 08147cedaec6a7fa3ac77aa31f5858783a993a5c Author: Lee Leahy leroy.p.leahy@intel.com Date: Fri May 8 11:33:55 2015 -0700
cbmem: Add initial allocation support
Add support to allocate a region just below CBMEM root. This region is reserved for FSP 1.1 to use for its stack and variables.
BRANCH=none BUG=None TEST=Build and run on Braswell
Change-Id: I1d4b36ab366e6f8e036335c56c1756f2dfaab3f5 Signed-off-by: Lee Leahy leroy.p.leahy@intel.com --- src/include/cbmem.h | 12 ++++++++++++ src/lib/imd_cbmem.c | 29 ++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 9 deletions(-)
diff --git a/src/include/cbmem.h b/src/include/cbmem.h index 07e5645..f4dbf87 100644 --- a/src/include/cbmem.h +++ b/src/include/cbmem.h @@ -146,12 +146,24 @@ struct cbmem_entry; */
#define DYN_CBMEM_ALIGN_SIZE (4096) +#define CBMEM_ROOT_SIZE DYN_CBMEM_ALIGN_SIZE + +/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ +#define CBMEM_ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE +#define CBMEM_LG_ALIGN CBMEM_ROOT_MIN_SIZE + +/* Small allocation parameters. */ +#define CBMEM_SM_ROOT_SIZE 1024 +#define CBMEM_SM_ALIGN 32
/* By default cbmem is attempted to be recovered. Returns 0 if cbmem was * recovered or 1 if cbmem had to be reinitialized. */ int cbmem_initialize(void); +int cbmem_initialize_id_size(u32 id, u64 size); + /* Initialize cbmem to be empty. */ void cbmem_initialize_empty(void); +void cbmem_initialize_empty_id_size(u32 id, u64 size);
/* Return the top address for dynamic cbmem. The address returned needs to * be consistent across romstage and ramstage, and it is required to be diff --git a/src/lib/imd_cbmem.c b/src/lib/imd_cbmem.c index 0649bf3..fc12c25 100644 --- a/src/lib/imd_cbmem.c +++ b/src/lib/imd_cbmem.c @@ -30,13 +30,6 @@ #include <arch/acpi.h> #endif
-/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ -#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE -#define LG_ALIGN ROOT_MIN_SIZE -/* Small allocation parameters. */ -#define SM_ROOT_SIZE 1024 -#define SM_ALIGN 32 - static inline struct imd *cbmem_get_imd(void) { /* Only supply a backing store for imd in ramstage. */ @@ -116,6 +109,11 @@ static struct imd *imd_init_backing_with_recover(struct imd *backing)
void cbmem_initialize_empty(void) { + cbmem_initialize_empty_id_size(0, 0); +} + +void cbmem_initialize_empty_id_size(u32 id, u64 size) +{ struct imd *imd; struct imd imd_backing;
@@ -127,12 +125,16 @@ void cbmem_initialize_empty(void)
printk(BIOS_DEBUG, "CBMEM:\n");
- if (imd_create_tiered_empty(imd, ROOT_MIN_SIZE, LG_ALIGN, - SM_ROOT_SIZE, SM_ALIGN)) { + if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, + CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { printk(BIOS_DEBUG, "failed.\n"); return; }
+ /* Add the specified range first */ + if (size) + cbmem_add(id, size); + /* Complete migration to CBMEM. */ cbmem_run_init_hooks(); } @@ -146,6 +148,11 @@ static inline int cbmem_fail_recovery(void)
int cbmem_initialize(void) { + return cbmem_initialize_id_size(0, 0); +} + +int cbmem_initialize_id_size(u32 id, u64 size) +{ struct imd *imd; struct imd imd_backing;
@@ -167,6 +174,10 @@ int cbmem_initialize(void) imd_lockdown(imd); #endif
+ /* Add the specified range first */ + if (size) + cbmem_add(id, size); + /* Complete migration to CBMEM. */ cbmem_run_init_hooks();