Martin L Roth has submitted this change. ( https://review.coreboot.org/c/coreboot/+/41957 )
Change subject: allocator_v4: Introduce RESOURCE_ALLOCATION_TOP_DOWN ......................................................................
allocator_v4: Introduce RESOURCE_ALLOCATION_TOP_DOWN
Add option to resource allocator v4 that restores the top-down allocation approach at the domain level.
This makes it easier to handle 64-bit resources natively. With the top-down approach, resources that can be placed either above or below 4G would be placed above, to save precious space below the 4G boundary.
Change-Id: Iaf463d3e6b37d52e46761d8e210034fded58a8a4 Signed-off-by: Nico Huber nico.h@gmx.de Reviewed-on: https://review.coreboot.org/c/coreboot/+/41957 Tested-by: build bot (Jenkins) no-reply@coreboot.org Reviewed-by: Tim Wawrzynczak twawrzynczak@chromium.org Reviewed-by: Lean Sheng Tan sheng.tan@9elements.com Reviewed-by: Subrata Banik subratabanik@google.com --- M src/device/Kconfig M src/device/resource_allocator_v4.c M src/include/memrange.h M src/lib/memrange.c M tests/lib/memrange-test.c 5 files changed, 95 insertions(+), 34 deletions(-)
Approvals: build bot (Jenkins): Verified Lean Sheng Tan: Looks good to me, but someone else must approve Tim Wawrzynczak: Looks good to me, approved Subrata Banik: Looks good to me, approved
diff --git a/src/device/Kconfig b/src/device/Kconfig index 6f4a24e..af9beb3 100644 --- a/src/device/Kconfig +++ b/src/device/Kconfig @@ -933,6 +933,11 @@ ranges for allocating resources. This allows allocation of resources above 4G boundary as well.
+config RESOURCE_ALLOCATION_TOP_DOWN + bool "Allocate resources from top down" + default y + depends on RESOURCE_ALLOCATOR_V4 + config XHCI_UTILS def_bool n help diff --git a/src/device/resource_allocator_v4.c b/src/device/resource_allocator_v4.c index 2b62fc8..222f1e9 100644 --- a/src/device/resource_allocator_v4.c +++ b/src/device/resource_allocator_v4.c @@ -372,6 +372,9 @@ static void allocate_child_resources(struct bus *bus, struct memranges *ranges, unsigned long type_mask, unsigned long type_match) { + const bool allocate_top_down = + bus->dev->path.type == DEVICE_PATH_DOMAIN && + CONFIG(RESOURCE_ALLOCATION_TOP_DOWN); struct resource *resource = NULL; const struct device *dev;
@@ -381,7 +384,7 @@ continue;
if (memranges_steal(ranges, resource->limit, resource->size, resource->align, - type_match, &resource->base) == false) { + type_match, &resource->base, allocate_top_down) == false) { printk(BIOS_ERR, " ERROR: Resource didn't fit!!! "); printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n", dev_path(dev), resource->index, diff --git a/src/include/memrange.h b/src/include/memrange.h index a8a8de9..f7c5bfc 100644 --- a/src/include/memrange.h +++ b/src/include/memrange.h @@ -161,15 +161,17 @@ const struct range_entry *r);
/* Steals memory from the available list in given ranges as per the constraints: - * limit = Upper bound for the memory range to steal (Inclusive). - * size = Requested size for the stolen memory. - * align = Required alignment(log 2) for the starting address of the stolen memory. - * tag = Use a range that matches the given tag. + * limit = Upper bound for the memory range to steal (Inclusive). + * size = Requested size for the stolen memory. + * align = Required alignment(log 2) for the starting address of the stolen memory. + * tag = Use a range that matches the given tag. + * from_top = Steal the highest possible range. * * If the constraints can be satisfied, this function creates a hole in the memrange, * writes the base address of that hole to stolen_base and returns true. Otherwise it returns * false. */ bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size, - unsigned char align, unsigned long tag, resource_t *stolen_base); + unsigned char align, unsigned long tag, resource_t *stolen_base, + bool from_top);
#endif /* MEMRANGE_H_ */ diff --git a/src/lib/memrange.c b/src/lib/memrange.c index 39f502c..b68b86e 100644 --- a/src/lib/memrange.c +++ b/src/lib/memrange.c @@ -378,11 +378,11 @@
/* Find a range entry that satisfies the given constraints to fit a hole that matches the * required alignment, is big enough, does not exceed the limit and has a matching tag. */ -static const struct range_entry *memranges_find_entry(struct memranges *ranges, - resource_t limit, resource_t size, - unsigned char align, unsigned long tag) +static const struct range_entry * +memranges_find_entry(struct memranges *ranges, resource_t limit, resource_t size, + unsigned char align, unsigned long tag, bool last) { - const struct range_entry *r; + const struct range_entry *r, *last_entry = NULL; resource_t base, end;
if (size == 0) @@ -407,25 +407,35 @@ if (end > limit) break;
- return r; + if (!last) + return r; + + last_entry = r; }
- return NULL; + return last_entry; }
bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size, - unsigned char align, unsigned long tag, resource_t *stolen_base) + unsigned char align, unsigned long tag, resource_t *stolen_base, + bool from_top) { - resource_t base; - const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag); + const struct range_entry *r;
+ r = memranges_find_entry(ranges, limit, size, align, tag, from_top); if (r == NULL) return false;
- base = ALIGN_UP(r->begin, POWER_OF_2(align)); - - memranges_create_hole(ranges, base, size); - *stolen_base = base; + if (from_top) { + /* Ensure we're within the range, even aligned down. + Proof is simple: If ALIGN_UP(r->begin) would be + higher, the stolen range wouldn't fit.*/ + assert(r->begin <= ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align))); + *stolen_base = ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align)); + } else { + *stolen_base = ALIGN_UP(r->begin, POWER_OF_2(align)); + } + memranges_create_hole(ranges, *stolen_base, size);
return true; } diff --git a/tests/lib/memrange-test.c b/tests/lib/memrange-test.c index 557ade4..9871484 100644 --- a/tests/lib/memrange-test.c +++ b/tests/lib/memrange-test.c @@ -457,8 +457,9 @@ }
/* - * This test verifies memranges_steal() function. Simple check is done by attempt so steal some - * memory from region with READONLY_TAG. + * This test verifies memranges_steal() function. Simple check is done by attempt + * to steal some memory from the top of region with CACHEABLE_TAG and some from + * the bottom of region with READONLY_TAG. * * Example memory ranges (res_mock1) for test_memrange_steal. * Space marked with (/) is stolen during the test. @@ -466,8 +467,8 @@ * +--------CACHEABLE_TAG--------+ <-0xE000 * | | * | | - * | | - * +-----------------------------+ <-0x100000 + * |/////////////////////////////| <-stolen_base + * +-----------------------------+ <-0x100000 <-stolen_base + 0x4000 * * * @@ -501,13 +502,27 @@
status = memranges_steal(&test_memrange, res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size, - stolen_range_size, 12, READONLY_TAG, &stolen); + stolen_range_size, 12, CACHEABLE_TAG, &stolen, true); + assert_true(status); + assert_in_range(stolen, res_mock[CACHEABLE_TAG].base, + res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size); + status = memranges_steal(&test_memrange, + res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size, + stolen_range_size, 12, READONLY_TAG, &stolen, false); assert_true(status); assert_in_range(stolen, res_mock[READONLY_TAG].base, res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
memranges_each_entry(ptr, &test_memrange) { + if (range_entry_tag(ptr) == CACHEABLE_TAG) { + assert_int_equal(range_entry_end(ptr), + ALIGN_DOWN(ALIGN_UP(res_mock[CACHEABLE_TAG].base + + res_mock[CACHEABLE_TAG].size, + MEMRANGE_ALIGN) + - stolen_range_size, + MEMRANGE_ALIGN)); + } if (range_entry_tag(ptr) == READONLY_TAG) { assert_int_equal(range_entry_base(ptr), ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN) @@ -518,20 +533,23 @@ assert_int_equal(count, 3); count = 0;
- /* Check if inserting range in previously stolen area will merge it. */ + /* Check if inserting ranges in previously stolen areas will merge them. */ + memranges_insert(&test_memrange, + res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size + - stolen_range_size - 0x12, + stolen_range_size, CACHEABLE_TAG); memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size, READONLY_TAG); memranges_each_entry(ptr, &test_memrange) { - if (range_entry_tag(ptr) == READONLY_TAG) { - assert_int_equal( - range_entry_base(ptr), - ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)); - assert_int_equal( - range_entry_end(ptr), - ALIGN_UP(res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size, - MEMRANGE_ALIGN)); - } + const unsigned long tag = range_entry_tag(ptr); + assert_true(tag == CACHEABLE_TAG || tag == READONLY_TAG || tag == RESERVED_TAG); + assert_int_equal( + range_entry_base(ptr), + ALIGN_DOWN(res_mock[tag].base, MEMRANGE_ALIGN)); + assert_int_equal( + range_entry_end(ptr), + ALIGN_UP(res_mock[tag].base + res_mock[tag].size, MEMRANGE_ALIGN)); count++; } assert_int_equal(count, 3);