On 03/08/2015 06:27 PM, Michael S. Tsirkin wrote:
On Sun, Mar 08, 2015 at 01:16:14PM +0200, Marcel Apfelbaum wrote:
Save the IO/mem/bus numbers ranges assigned to the extra root busses to be removed from the root bus 0 range.
Signed-off-by: Marcel Apfelbaum marcel@redhat.com
hw/i386/acpi-build.c | 149 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index e7a1a36..f4d8816 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -728,6 +728,148 @@ static Aml *build_prt(void) return method; }
+typedef struct PciRangeEntry {
- QLIST_ENTRY(PciRangeEntry) entry;
- int64_t base;
- int64_t limit;
+} PciRangeEntry;
+typedef QLIST_HEAD(PciRangeQ, PciRangeEntry) PciRangeQ;
+static void pci_range_insert(PciRangeQ *list, int64_t base, int64_t limit)
Don't start with pci_ or Pci prefixes, this is for pci things. signed values for base/limit might be problematic, even though currenly guests don't assign such values normally. I know it's a qmp bug/feature, but you don't have to use qmp.
Sure, no problem, I'll take care of it.
+{
- PciRangeEntry *entry, *next, *e;
- if (!base) {
return;
- }
- e = g_malloc(sizeof(*entry));
- e->base = base;
- e->limit = limit;
- if (QLIST_EMPTY(list)) {
QLIST_INSERT_HEAD(list, e, entry);
- } else {
QLIST_FOREACH_SAFE(entry, list, entry, next) {
if (base < entry->base) {
QLIST_INSERT_BEFORE(entry, e, entry);
break;
} else if (!next) {
QLIST_INSERT_AFTER(entry, e, entry);
break;
}
}
- }
+}
+static void pci_range_list_free(PciRangeQ *list) +{
- PciRangeEntry *entry, *next;
- QLIST_FOREACH_SAFE(entry, list, entry, next) {
QLIST_REMOVE(entry, entry);
g_free(entry);
- }
+}
Not very happy about manual memory management here. Isn't there something you can do with
The context is pretty simple: 1. We create the ranges and add them to the lists as we go. 2. We use the lists to create the aml IO ranges. 3. Delete them once we finish. All that in the same context (the same chunk of code) It seems pretty straight forward to me.
And how about using g_array_sort to sort things?
We can, but the code simply inserts the range into the appropriate position, why using g_array_sort be better? Maybe we can find another reason to use g_array, we can leverage this method. I thought about it and I had a problem with g_array, however I don't remember it now.
And BTW manual memory management, we will need to create and destroy the g_array.
+static Aml *build_crs(PcPciInfo *pci, PciInfo *bus_info,
PciRangeQ *io_ranges, PciRangeQ *mem_ranges)
+{
- PciDeviceInfoList *dev_list;
- PciMemoryRange range;
- uint8_t max_bus;
- Aml *crs;
- crs = aml_resource_template();
- max_bus = bus_info->bus;
- for (dev_list = bus_info->devices; dev_list; dev_list = dev_list->next) {
PciMemoryRegionList *region;
for (region = dev_list->value->regions; region; region = region->next) {
range.base = region->value->address;
range.limit = region->value->address + region->value->size - 1;
if (!strcmp(region->value->type, "io")) {
aml_append(crs,
aml_word_io(aml_min_fixed, aml_max_fixed,
aml_pos_decode, aml_entire_range,
0,
range.base,
range.limit,
0,
range.limit - range.base + 1));
pci_range_insert(io_ranges, range.base, range.limit);
} else { /* "memory" */
aml_append(crs,
aml_dword_memory(aml_pos_decode, aml_min_fixed,
aml_max_fixed, aml_non_cacheable,
aml_ReadWrite,
0,
range.base,
range.limit,
0,
range.limit - range.base + 1));
pci_range_insert(mem_ranges, range.base, range.limit);
}
}
if (dev_list->value->has_pci_bridge) {
PciBridgeInfo *bridge_info = dev_list->value->pci_bridge;
if (bridge_info->bus.subordinate > max_bus) {
What's this doing?
It keeps track of the max bus number for piix hostbridge bus num range. (see [1] below) Pci root bus 0 range is [0 - <minimmum bus number used by other pci root buses> Maybe the variable name is not so good, I am open to suggestions: min_extra_root_bus_nr?
max_bus = bridge_info->bus.subordinate;
}
range = *bridge_info->bus.io_range;
aml_append(crs,
aml_word_io(aml_min_fixed, aml_max_fixed,
aml_pos_decode, aml_entire_range,
0,
range.base,
range.limit,
0,
range.limit - range.base + 1));
pci_range_insert(io_ranges, range.base, range.limit);
range = *bridge_info->bus.memory_range;
aml_append(crs,
aml_dword_memory(aml_pos_decode, aml_min_fixed,
aml_max_fixed, aml_non_cacheable,
aml_ReadWrite,
0,
range.base,
range.limit,
0,
range.limit - range.base + 1));
pci_range_insert(mem_ranges, range.base, range.limit);
range = *bridge_info->bus.prefetchable_range;
aml_append(crs,
aml_dword_memory(aml_pos_decode, aml_min_fixed,
aml_max_fixed, aml_non_cacheable,
aml_ReadWrite,
0,
range.base,
range.limit,
0,
range.limit - range.base + 1));
pci_range_insert(mem_ranges, range.base, range.limit);
}
- }
- aml_append(crs,
aml_word_bus_number(aml_min_fixed, aml_max_fixed, aml_pos_decode,
0,
bus_info->bus,
max_bus,
0,
max_bus - bus_info->bus + 1));
[1] The bus numbers range for piix host-bridge (bus 0).
Thanks, Marcel
- return crs;
+}
- static void build_ssdt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc,
@@ -737,6 +879,8 @@ build_ssdt(GArray *table_data, GArray *linker, uint32_t nr_mem = machine->ram_slots; unsigned acpi_cpus = guest_info->apic_id_limit; Aml *ssdt, *sb_scope, *scope, *pkg, *dev, *method, *crs, *field, *ifctx;
PciRangeQ io_ranges = QLIST_HEAD_INITIALIZER(io_ranges);
PciRangeQ mem_ranges = QLIST_HEAD_INITIALIZER(mem_ranges); int i;
ssdt = init_aml_allocator();
@@ -773,9 +917,14 @@ build_ssdt(GArray *table_data, GArray *linker, aml_append(dev, aml_name_decl("_BBN", aml_int((uint8_t)bus_info->bus))); aml_append(dev, build_prt());
crs = build_crs(pci, bus_info, &io_ranges, &mem_ranges);
aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(ssdt, scope); }
pci_range_list_free(&io_ranges);
pci_range_list_free(&mem_ranges); qapi_free_PciInfoList(info_list); }
-- 2.1.0