Attention is currently required from: Julius Werner. Raul Rangel has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/56049 )
Change subject: cbfs: Introduce and implement cbfs_load_async ......................................................................
cbfs: Introduce and implement cbfs_load_async
This CL adds support for the Futures API to cbfs. This allows loading a file from CBFS asynchronously. It currently only supports uncompressed files. More APIs will be added as I start to load stages, oproms, etc asynchronously.
BUG=b:179699789 TEST=Boot guybrush and see payload read/decompress drop by 23 ms.
Signed-off-by: Raul E Rangel rrangel@chromium.org Change-Id: Idf4cee8a7094a9470aff997e4b06e823cb4811c0 --- M src/include/cbfs.h M src/lib/cbfs.c 2 files changed, 184 insertions(+), 0 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/49/56049/1
diff --git a/src/include/cbfs.h b/src/include/cbfs.h index 9ed5233..885e4d3 100644 --- a/src/include/cbfs.h +++ b/src/include/cbfs.h @@ -5,6 +5,7 @@
#include <cbmem.h> #include <commonlib/bsd/cbfs_mdata.h> +#include <commonlib/bsd/future.h> #include <commonlib/cbfs.h> #include <commonlib/mem_pool.h> #include <program_loading.h> @@ -58,6 +59,9 @@ * is CBFS_TYPE_QUERY, it will be replaced with the actual CBFS type of the found file. If * it is anything else, the type will be compared with the actually found type, and the * operation will fail if they don't match. + * + * There are also async flavors of the functions. These will read from the underlying data store + * asynchronously. */
/* @@ -97,6 +101,16 @@ static inline void *cbfs_ro_type_cbmem_alloc(const char *name, uint32_t cbmem_id, size_t *size_out, enum cbfs_type *type);
+struct cbfs_load_async_context { + struct future future; + void *buffer; + size_t size; + enum cbfs_type type; +}; + +cb_err_t cbfs_load_async(const char *name, struct cbfs_load_async_context *context, void *buf, + size_t size); + /* Removes a previously allocated CBFS mapping. Should try to unmap mappings in strict LIFO order where possible, since mapping backends often don't support more complicated cases. */ void cbfs_unmap(void *mapping); diff --git a/src/lib/cbfs.c b/src/lib/cbfs.c index 32ebfaf..d653c6d 100644 --- a/src/lib/cbfs.c +++ b/src/lib/cbfs.c @@ -304,6 +304,176 @@ return cbfs_map(name, NULL); }
+struct cbfs_load_async_internal_context { + struct cbfs_load_async_context *public_context; + struct rdev_readat_async_context rdev_context; + uint8_t *buffer; + size_t buffer_size; + enum cbfs_type type; + enum cbfs_compression compression; + size_t compressed_size; + size_t decompressed_size; + const struct vb2_hash *file_hash; +}; + +/* + * We avoid malloc and statically allocate a few contexts. Since the list is currently + * so small we just iterate through the list. If we ever need to dramatically increase + * the number of entries, we should maybe use a better data structure and algorithms. + */ +static struct cbfs_load_async_internal_context async_contexts[4]; + +static struct cbfs_load_async_internal_context *get_free_context(void) +{ + struct cbfs_load_async_internal_context *context; + for (unsigned int i = 0; i < ARRAY_SIZE(async_contexts); ++i) { + context = &async_contexts[i]; + if (!context->public_context) { + memset(context, 0, sizeof(*context)); + return context; + } + } + + die("%s: All async_contexts are in use\n", __func__); +} + +static void free_context(struct cbfs_load_async_internal_context *context) +{ + context->public_context = NULL; +} +static struct cbfs_load_async_internal_context *find_context(struct future *future) +{ + struct cbfs_load_async_internal_context *context; + for (unsigned int i = 0; i < ARRAY_SIZE(async_contexts); ++i) { + context = &async_contexts[i]; + if (!context->public_context) + continue; + + if (&context->public_context->future == future) + return context; + } + + die("%s: Failed to locate context for future: %p\n", future); +} + +static enum future_state cbfs_load_poll_future(struct future *future, bool busy_loop) +{ + enum future_state state; + struct cbfs_load_async_internal_context *context = find_context(future); + struct future *rdev_future = &context->rdev_context.future; + + /* Check the rdev future, if it's not done, then we have nothing to do */ + state = rdev_future->poll(rdev_future, busy_loop); + if (state != FUTURE_DONE) + return state; + + if (rdev_future->error != CB_SUCCESS) { + ERROR("async readat failed\n"); + complete_future(future, rdev_future->error); + free_context(context); + return FUTURE_DONE; + } + + /* + * We only want to perform verification and/or decompression when + * the buffer is required. This way we can attribute the CPU time to + * the correct timestamps. + */ + if (!busy_loop) + return FUTURE_PENDING; + + if (context->file_hash) { + if (cbfs_file_hash_mismatch(context->buffer, context->compressed_size, + context->file_hash)) { + complete_future(future, CB_CBFS_HASH_MISMATCH); + free_context(context); + return FUTURE_DONE; + } + } + + // TODO: Add LZ4 decompression + assert(context->compression == CBFS_COMPRESS_NONE); + + /* Complete the future and populate the output values */ + context->public_context->buffer = context->buffer; + context->public_context->size = context->decompressed_size; + context->public_context->type = context->type; + + complete_future(future, CB_SUCCESS); + free_context(context); + + return FUTURE_DONE; +} + +cb_err_t cbfs_load_async(const char *name, struct cbfs_load_async_context *public_context, + void *buffer, size_t buffer_size) +{ + struct region_device rdev; + union cbfs_mdata mdata; + struct cbfs_load_async_internal_context *context; + uint32_t compression = CBFS_COMPRESS_NONE; + size_t decompressed_size; + const struct cbfs_file_attr_compression *cattr; + cb_err_t err; + + DEBUG("%s(name='%s', buffer=%p, buffer_size=%#zx)\n", __func__, name, buffer, + buffer_size); + + assert(public_context); + + public_context->future.poll = NULL; + + if (cbfs_boot_lookup(name, false, &mdata, &rdev)) { + ERROR("file was not found.\n"); + return CB_ERR_ARG; + } + + if (buffer_size < region_device_sz(&rdev)) { + ERROR("target buffer is not large enough.\n"); + return CB_ERR_ARG; + } + + cattr = cbfs_find_attr(&mdata, CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr)); + if (cattr) { + compression = be32toh(cattr->compression); + decompressed_size = be32toh(cattr->decompressed_size); + } else { + decompressed_size = region_device_sz(&rdev); + } + + /* TODO: Add support for LZ4. LZMA is tricky since it can't be decompressed in place */ + if (compression != CBFS_COMPRESS_NONE) { + LOG("compressed payloads are not currently supported.\n"); + return CB_ERR; + } + + context = get_free_context(); + context->public_context = public_context; + context->public_context->future.poll = cbfs_load_poll_future; + context->buffer = buffer; + context->buffer_size = buffer_size; + context->type = be32toh(mdata.h.type); + context->compression = compression; + context->compressed_size = region_device_sz(&rdev); + context->decompressed_size = decompressed_size; + + if (CONFIG(CBFS_VERIFICATION)) + context->file_hash = cbfs_file_hash(&mdata); + + DEBUG("Reading %zu bytes to %p asynchronously\n", context->compressed_size, + context->buffer); + + err = rdev_readat_async(&rdev, &context->rdev_context, buffer, 0, + context->compressed_size); + if (err) { + ERROR("Cannot perform async readat\n"); + public_context->future.poll = NULL; + free_context(context); + } + + return err; +} + void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg, size_t *size_out, bool force_ro, enum cbfs_type *type) {