In pci_bios_map_regions() we try to reserve memory for all entries of root bus regions. If pci_bios_init_root_regions() fails - e.g no enough space, we create two new pci_regions: r64pref, r64mem and migrate all entries which are 64bit capable to them. Migration process is very simple: delete the entry from one list add to another. Then try pci_bios_init_root_regions() again.
If it passes, we map entries for each region. 1. Calculate base address of the entry. And increase pci_region base address. 2. Program PCI BAR or bridge region. If the entry belongs to PCI-to-PCI bridge and provides a pci_region for downstream devices, we set base address of the region the entry provides. 3. Delete entry.
Signed-off-by: Alexey Korolev alexey.korolev@endace.com --- src/config.h | 2 + src/pciinit.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 124 insertions(+), 1 deletions(-)
diff --git a/src/config.h b/src/config.h index b0187a4..bbacae7 100644 --- a/src/config.h +++ b/src/config.h @@ -47,6 +47,8 @@
#define BUILD_PCIMEM_START 0xe0000000 #define BUILD_PCIMEM_END 0xfec00000 /* IOAPIC is mapped at */ +#define BUILD_PCIMEM64_START 0x8000000000ULL +#define BUILD_PCIMEM64_END 0x10000000000ULL
#define BUILD_IOAPIC_ADDR 0xfec00000 #define BUILD_HPET_ADDRESS 0xfed00000 diff --git a/src/pciinit.c b/src/pciinit.c index 03ece34..0fba130 100644 --- a/src/pciinit.c +++ b/src/pciinit.c @@ -496,6 +496,126 @@ static int pci_bios_fill_regions(struct pci_region *regions) return 0; }
+/**************************************************************** + * Map pci region entries + ****************************************************************/ + +#define ROOT_BASE(top, sum, max) ALIGN_DOWN((top)-(sum),(max) ?: 1) +// Setup region bases (given the regions' size and alignment) +static int pci_bios_init_root_regions(struct pci_region *regions) +{ + struct pci_region *r_end, *r_start; + regions[PCI_REGION_TYPE_IO].base = 0xc000; + + r_end = ®ions[PCI_REGION_TYPE_PREFMEM]; + r_start = ®ions[PCI_REGION_TYPE_MEM]; + if (pci_region_sum(r_end) > pci_region_sum(r_start)) { + // Swap regions so larger area is more likely to align well. + r_end = r_start; + r_start = ®ions[PCI_REGION_TYPE_PREFMEM]; + } + // Out of space + if ((pci_region_sum(r_end) + pci_region_sum(r_start) > BUILD_PCIMEM_END)) + return -1; + + r_end->base = ROOT_BASE(BUILD_PCIMEM_END, pci_region_sum(r_end), + pci_region_max_size(r_end)); + r_start->base = ROOT_BASE(r_end->base, pci_region_sum(r_start), + pci_region_max_size(r_start)); + if (r_start->base < BUILD_PCIMEM_START) + // Memory range requested is larger than available... + return -1; + return 0; +} + +static void +pci_region_move_64bit_entries(struct pci_region *to, struct pci_region *from) +{ + struct pci_region_entry *entry, *next; + foreach_region_entry_safe(from, next, entry) { + if (entry->is64bit) { + region_entry_del(entry); + region_entry_add(to, entry); + entry->parent_region = to; + } + } +} + +static void pci_region_map_one_entry(struct pci_region_entry *entry) +{ + if (!entry->this_region ) { + pci_set_io_region_addr(entry->dev, entry->bar, entry->base); + if (entry->is64bit) + pci_set_io_region_addr(entry->dev, entry->bar + 1, entry->base >> 32); + return; + } + + entry->this_region->base = entry->base; + u16 bdf = entry->dev->bdf; + u64 base = entry->base; + u64 limit = entry->base + entry->size - 1; + if (entry->type == PCI_REGION_TYPE_IO) { + pci_config_writeb(bdf, PCI_IO_BASE, base >> 8); + pci_config_writew(bdf, PCI_IO_BASE_UPPER16, 0); + pci_config_writeb(bdf, PCI_IO_LIMIT, limit >> 8); + pci_config_writew(bdf, PCI_IO_LIMIT_UPPER16, 0); + } + if (entry->type == PCI_REGION_TYPE_MEM) { + pci_config_writew(bdf, PCI_MEMORY_BASE, base >> 16); + pci_config_writew(bdf, PCI_MEMORY_LIMIT, limit >> 16); + } + if (entry->type == PCI_REGION_TYPE_PREFMEM) { + pci_config_writew(bdf, PCI_PREF_MEMORY_BASE, base >> 16); + pci_config_writew(bdf, PCI_PREF_MEMORY_LIMIT, limit >> 16); + pci_config_writel(bdf, PCI_PREF_BASE_UPPER32, base >> 32); + pci_config_writel(bdf, PCI_PREF_LIMIT_UPPER32, limit >> 32); + } + return; +} + +static void pci_region_map_entries(struct pci_region *r) +{ + struct pci_region_entry *entry, *next; + u64 size, max_size = pci_region_max_size(r); + + for (size = max_size; size > 0; size >>= 1) { + foreach_region_entry_safe(r, next, entry) { + if (size == entry->size) { + entry->base = r->base; + r->base += size; + dump_entry(entry); + pci_region_map_one_entry(entry); + region_entry_del(entry); + free(entry); + } + } + } +} +static int pci_bios_map_regions(struct pci_region *regions) +{ + if (pci_bios_init_root_regions(regions)) { + struct pci_region r64pref, r64mem; + memset(&r64pref, 0, sizeof(struct pci_region)); + memset(&r64mem, 0, sizeof(struct pci_region)); + pci_region_move_64bit_entries(&r64pref, ®ions[PCI_REGION_TYPE_PREFMEM]); + pci_region_move_64bit_entries(&r64mem, ®ions[PCI_REGION_TYPE_MEM]); + + if (pci_bios_init_root_regions(regions)) { + panic("PCI: out of address space\n"); + } + r64pref.base = BUILD_PCIMEM64_START; + r64mem.base = ALIGN((r64pref.base + pci_region_sum(&r64pref)), + pci_region_max_size(&r64mem)); + pci_region_map_entries(&r64pref); + pci_region_map_entries(&r64mem); + } + + int i; + for (i = 0; i < (MaxPCIBus + 1) * PCI_REGION_TYPE_COUNT; i++) + pci_region_map_entries(®ions[i]); + + return 0; +}
static void pci_bios_bus_reserve(struct pci_bus *bus, int type, u32 size) { @@ -744,9 +864,10 @@ pci_setup(void) }
dprintf(1, "=== PCI new allocation pass #2 ===\n"); + pci_bios_map_regions(regions); pci_bios_map_devices(busses);
pci_bios_init_devices();
- free(busses); + free(regions); }