Aaron Durbin (adurbin@google.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/8622
-gerrit
commit 98af52d76f5c90b539cde03a293b901a32ea0384 Author: Aaron Durbin adurbin@chromium.org Date: Thu Mar 5 21:18:33 2015 -0600
coreboot: use imd library for cbmem
Utilize the newly introduced imd library for the guts of the dynamic cbmem code.
Change-Id: I47bb71d95895bd5cbbf27a8c2a47ce94f4f4a702 Signed-off-by: Aaron Durbin adurbin@chromium.org --- src/include/cbmem.h | 12 -- src/lib/cbmem_common.c | 28 ---- src/lib/dynamic_cbmem.c | 330 +++++++----------------------------------------- 3 files changed, 48 insertions(+), 322 deletions(-)
diff --git a/src/include/cbmem.h b/src/include/cbmem.h index 2f86b85..a8c96ee 100644 --- a/src/include/cbmem.h +++ b/src/include/cbmem.h @@ -80,11 +80,6 @@ #include <stddef.h> #include <stdint.h>
-struct cbmem_id_to_name { - u32 id; - const char *name; -}; - #define CBMEM_ID_TO_NAME_TABLE \ { CBMEM_ID_FREESPACE, "FREE SPACE " }, \ { CBMEM_ID_GDT, "GDT " }, \ @@ -128,13 +123,6 @@ struct cbmem_entry; * dynamic cbmem infrastructure allocates new regions below the last allocated * region. Regions are defined by a cbmem_entry struct that is opaque. Regions * may be removed, but the last one added is the only that can be removed. - * - * Dynamic cbmem has two allocators within it. All allocators use a top down - * allocation scheme. However, there are 2 modes for each allocation depending - * on the requested size. There are large allocations and small allocations. - * An allocation is considered to be small when it is less than or equal to - * DYN_CBMEM_ALIGN_SIZE / 2. The smaller allocations are fit into a larger - * allocation region. */
#define DYN_CBMEM_ALIGN_SIZE (4096) diff --git a/src/lib/cbmem_common.c b/src/lib/cbmem_common.c index c3e8383..ea2d63d 100644 --- a/src/lib/cbmem_common.c +++ b/src/lib/cbmem_common.c @@ -24,34 +24,6 @@ #include <console/cbmem_console.h> #include <timestamp.h>
-#ifndef __PRE_RAM__ - -static const struct cbmem_id_to_name cbmem_ids[] = { CBMEM_ID_TO_NAME_TABLE }; - -void cbmem_print_entry(int n, u32 id, u64 base, u64 size) -{ - int i; - const char *name; - - name = NULL; - for (i = 0; i < ARRAY_SIZE(cbmem_ids); i++) { - if (cbmem_ids[i].id == id) { - name = cbmem_ids[i].name; - break; - } - } - - if (name == NULL) - printk(BIOS_DEBUG, "%08x ", id); - else - printk(BIOS_DEBUG, "%s", name); - printk(BIOS_DEBUG, "%2d. ", n); - printk(BIOS_DEBUG, "%08llx ", base); - printk(BIOS_DEBUG, "%08llx\n", size); -} - -#endif /* !__PRE_RAM__ */ - /* FIXME: Replace with CBMEM_INIT_HOOKS API. */ #if !IS_ENABLED(CONFIG_ARCH_X86) void cbmem_run_init_hooks(void) diff --git a/src/lib/dynamic_cbmem.c b/src/lib/dynamic_cbmem.c index daa3717..529c589 100644 --- a/src/lib/dynamic_cbmem.c +++ b/src/lib/dynamic_cbmem.c @@ -21,52 +21,16 @@ #include <bootmem.h> #include <console/console.h> #include <cbmem.h> +#include <imd.h> #include <string.h> #include <stdlib.h> #include <arch/early_variables.h> #if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) #include <arch/acpi.h> #endif -#ifndef UINT_MAX -#define UINT_MAX 4294967295U -#endif - -/* - * The dynamic cbmem code uses a root region. The root region boundary - * addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below - * the address returned by cbmem_top() is a pointer that points to the - * root data structure. The root data structure provides the book keeping - * for each large entry. - */
/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ #define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE -#define CBMEM_POINTER_MAGIC 0xc0389479 -#define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC) - -/* The cbmem_root_pointer structure lives just below address returned - * from cbmem_top(). It points to the root data structure that - * maintains the entries. */ -struct cbmem_root_pointer { - u32 magic; - u32 root; -} __attribute__((packed)); - -struct cbmem_entry { - u32 magic; - u32 start; - u32 size; - u32 id; -} __attribute__((packed)); - -struct cbmem_root { - u32 max_entries; - u32 num_entries; - u32 locked; - u32 size; - struct cbmem_entry entries[0]; -} __attribute__((packed)); -
#if !defined(__PRE_RAM__) static void *cached_cbmem_top; @@ -89,98 +53,35 @@ static inline void *cbmem_top_cached(void) #endif }
-static inline uintptr_t get_top_aligned(void) -{ - uintptr_t top; +static struct imd imd_cbmem CAR_GLOBAL = { };
- /* Align down what is returned from cbmem_top(). */ - top = (uintptr_t)cbmem_top_cached(); - top &= ~(DYN_CBMEM_ALIGN_SIZE - 1); - - return top; -} - -static inline void *get_root(void) +static inline struct imd *cbmem_get_imd(void) { - uintptr_t pointer_addr; - struct cbmem_root_pointer *pointer; - - pointer_addr = get_top_aligned(); - if (pointer_addr == 0) - return NULL; - - pointer_addr -= sizeof(struct cbmem_root_pointer); - - pointer = (void *)pointer_addr; - if (pointer->magic != CBMEM_POINTER_MAGIC) - return NULL; - - pointer_addr = pointer->root; - return (void *)pointer_addr; + return car_get_var_ptr(&imd_cbmem); }
-static inline void cbmem_entry_assign(struct cbmem_entry *entry, - u32 id, u32 start, u32 size) +static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e) { - entry->magic = CBMEM_ENTRY_MAGIC; - entry->start = start; - entry->size = size; - entry->id = id; + return (const struct cbmem_entry *)e; }
-static inline const struct cbmem_entry * -cbmem_entry_append(struct cbmem_root *root, u32 id, u32 start, u32 size) +static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e) { - struct cbmem_entry *cbmem_entry; - - cbmem_entry = &root->entries[root->num_entries]; - root->num_entries++; - - cbmem_entry_assign(cbmem_entry, id, start, size); - - return cbmem_entry; + return (const struct imd_entry *)e; }
void cbmem_initialize_empty(void) { - uintptr_t pointer_addr; - uintptr_t root_addr; - unsigned long max_entries; - struct cbmem_root *root; - struct cbmem_root_pointer *pointer; - - /* Place the root pointer and the root. The number of entries is - * dictated by difference between the root address and the pointer - * where the root address is aligned down to - * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the - * address returned by get_top_aligned(). */ - pointer_addr = get_top_aligned(); - if (pointer_addr == 0) - return; - - root_addr = pointer_addr - ROOT_MIN_SIZE; - root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1); - pointer_addr -= sizeof(struct cbmem_root_pointer); - - max_entries = (pointer_addr - (root_addr + sizeof(*root))) / - sizeof(struct cbmem_entry); + struct imd *imd;
- pointer = (void *)pointer_addr; - pointer->magic = CBMEM_POINTER_MAGIC; - pointer->root = root_addr; + imd = cbmem_get_imd();
- root = (void *)root_addr; - root->max_entries = max_entries; - root->num_entries = 0; - root->locked = 0; - root->size = pointer_addr - root_addr + - sizeof(struct cbmem_root_pointer); + imd_handle_init(imd, cbmem_top_cached());
- /* Add an entry covering the root region. */ - cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size); + printk(BIOS_DEBUG, "CBMEM: ");
- printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n", - root, root->max_entries); + if (imd_create_empty(imd, DYN_CBMEM_ALIGN_SIZE, DYN_CBMEM_ALIGN_SIZE)) + return;
/* Complete migration to CBMEM. */ cbmem_run_init_hooks(); @@ -193,67 +94,24 @@ static inline int cbmem_fail_recovery(void) return 1; }
-static int validate_entries(struct cbmem_root *root) -{ - unsigned int i; - uintptr_t current_end; - - current_end = get_top_aligned(); - - printk(BIOS_DEBUG, "CBMEM: recovering %d/%d entries from root @ %p\n", - root->num_entries, root->max_entries, root); - - /* Check that all regions are properly aligned and are just below - * the previous entry */ - for (i = 0; i < root->num_entries; i++) { - struct cbmem_entry *entry = &root->entries[i]; - - if (entry->magic != CBMEM_ENTRY_MAGIC) - return -1; - - if (entry->start & (DYN_CBMEM_ALIGN_SIZE - 1)) - return -1; - - if (entry->start + entry->size != current_end) - return -1; - - current_end = entry->start; - } - - return 0; -} - int cbmem_initialize(void) { - struct cbmem_root *root; - uintptr_t top_according_to_root; - - root = get_root(); - - /* No recovery possible since root couldn't be recovered. */ - if (root == NULL) - return cbmem_fail_recovery(); + struct imd *imd;
- /* Sanity check the root. */ - top_according_to_root = (root->size + (uintptr_t)root); - if (get_top_aligned() != top_according_to_root) - return cbmem_fail_recovery(); + imd = cbmem_get_imd();
- if (root->num_entries > root->max_entries) - return cbmem_fail_recovery(); - - if ((root->max_entries * sizeof(struct cbmem_entry)) > - (root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root))) - return cbmem_fail_recovery(); + imd_handle_init(imd, cbmem_top_cached());
- /* Validate current entries. */ - if (validate_entries(root)) + if (imd_recover(imd) != 0) return cbmem_fail_recovery();
#if defined(__PRE_RAM__) - /* Lock the root in the romstage on a recovery. The assumption is that - * recovery is called during romstage on the S3 resume path. */ - root->locked = 1; + /* + * Lock the imd in romstage on a recovery. The assumption is that + * if the imd area was recovered in romstage then S3 resume path + * is being taken. + */ + imd_lockdown(imd); #endif
/* Complete migration to CBMEM. */ @@ -273,65 +131,16 @@ int cbmem_recovery(int is_wakeup) return rv; }
-static uintptr_t cbmem_base(void) -{ - struct cbmem_root *root; - uintptr_t low_addr; - - root = get_root(); - - if (root == NULL) - return 0; - - low_addr = (uintptr_t)root; - /* a low address is low. */ - low_addr &= 0xffffffff; - - /* Assume the lowest address is the last one added. */ - if (root->num_entries > 0) { - low_addr = root->entries[root->num_entries - 1].start; - } - - return low_addr; -} - - const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) { - struct cbmem_root *root; - const struct cbmem_entry *entry; - uintptr_t base; - u32 size; - u32 aligned_size; - - entry = cbmem_entry_find(id); - - if (entry != NULL) - return entry; - - /* Only handle sizes <= UINT_MAX internally. */ - if (size64 > (u64)UINT_MAX) - return NULL; - - size = size64; - - root = get_root(); - - if (root == NULL) - return NULL; - - /* Nothing can be added once it is locked down. */ - if (root->locked) - return NULL; + struct imd *imd; + const struct imd_entry *e;
- if (root->max_entries == root->num_entries) - return NULL; + imd = cbmem_get_imd();
- aligned_size = ALIGN(size, DYN_CBMEM_ALIGN_SIZE); - base = cbmem_base(); - base -= aligned_size; + e = imd_entry_find_or_add(imd, id, size64);
- return cbmem_entry_append(root, id, base, aligned_size); + return imd_to_cbmem(e); }
void *cbmem_add(u32 id, u64 size) @@ -349,25 +158,14 @@ void *cbmem_add(u32 id, u64 size) /* Retrieve a region provided a given id. */ const struct cbmem_entry *cbmem_entry_find(u32 id) { - struct cbmem_root *root; - const struct cbmem_entry *entry; - unsigned int i; - - root = get_root(); - - if (root == NULL) - return NULL; + struct imd *imd; + const struct imd_entry *e;
- entry = NULL; + imd = cbmem_get_imd();
- for (i = 0; i < root->num_entries; i++) { - if (root->entries[i].id == id) { - entry = &root->entries[i]; - break; - } - } + e = imd_entry_find(imd, id);
- return entry; + return imd_to_cbmem(e); }
void *cbmem_find(u32 id) @@ -386,43 +184,24 @@ void *cbmem_find(u32 id) * cannot be removed unless it was the last one added. */ int cbmem_entry_remove(const struct cbmem_entry *entry) { - unsigned long entry_num; - struct cbmem_root *root; - - root = get_root(); - - if (root == NULL) - return -1; - - if (root->num_entries == 0) - return -1; - - /* Nothing can be removed. */ - if (root->locked) - return -1; + const struct imd_entry *e = cbmem_to_imd(entry);
- entry_num = entry - &root->entries[0]; - - /* If the entry is the last one in the root it can be removed. */ - if (entry_num == (root->num_entries - 1)) { - root->num_entries--; - return 0; - } - - return -1; + return imd_entry_remove(cbmem_get_imd(), e); }
u64 cbmem_entry_size(const struct cbmem_entry *entry) { - return entry->size; + const struct imd_entry *e = cbmem_to_imd(entry); + + return imd_entry_size(cbmem_get_imd(), e); }
void *cbmem_entry_start(const struct cbmem_entry *entry) { - uintptr_t addr = entry->start; - return (void *)addr; -} + const struct imd_entry *e = cbmem_to_imd(entry);
+ return imd_entry_at(cbmem_get_imd(), e); +}
#if !defined(__PRE_RAM__)
@@ -458,30 +237,17 @@ BOOT_STATE_INIT_ENTRIES(cbmem_bscb) = {
void cbmem_add_bootmem(void) { - uintptr_t base; - uintptr_t top; + void *base = NULL; + size_t size = 0;
- base = cbmem_base(); - top = get_top_aligned(); - bootmem_add_range(base, top - base, LB_MEM_TABLE); + imd_region_used(cbmem_get_imd(), &base, &size); + bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE); }
void cbmem_list(void) { - unsigned int i; - struct cbmem_root *root; - - root = get_root(); - - if (root == NULL) - return; - - for (i = 0; i < root->num_entries; i++) { - struct cbmem_entry *entry; - - entry = &root->entries[i]; + static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
- cbmem_print_entry(i, entry->id, entry->start, entry->size); - } + imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup)); } #endif /* __PRE_RAM__ */