From: David Woodhouse David.Woodhouse@intel.com
Signed-off-by: David Woodhouse David.Woodhouse@intel.com --- src/byteorder.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/byteorder.h b/src/byteorder.h index 5a8a64a..7362aeb 100644 --- a/src/byteorder.h +++ b/src/byteorder.h @@ -43,7 +43,7 @@ static inline u16 le16_to_cpu(u16 x) { static inline u32 le32_to_cpu(u32 x) { return x; } -static inline u32 le64_to_cpu(u64 x) { +static inline u64 le64_to_cpu(u64 x) { return x; }
@@ -62,7 +62,7 @@ static inline u16 be16_to_cpu(u16 x) { static inline u32 be32_to_cpu(u32 x) { return swab32(x); } -static inline u32 be64_to_cpu(u64 x) { +static inline u64 be64_to_cpu(u64 x) { return swab64(x); }
From: David Woodhouse David.Woodhouse@intel.com
I'm about to make it do more than just the pmtimer...
Signed-off-by: David Woodhouse David.Woodhouse@intel.com --- src/acpi.c | 10 ++++------ src/acpi.h | 2 +- src/coreboot.c | 4 ++-- src/csm.c | 4 ++-- src/xen.c | 4 ++-- 5 files changed, 11 insertions(+), 13 deletions(-)
diff --git a/src/acpi.c b/src/acpi.c index c7177c3..36bd39a 100644 --- a/src/acpi.c +++ b/src/acpi.c @@ -925,15 +925,13 @@ find_resume_vector(void) }
void -find_pmtimer(void) +find_acpi_features(void) { struct fadt_descriptor_rev1 *fadt = find_fadt(); if (!fadt) return; - u32 pm_tmr = fadt->pm_tmr_blk; + u32 pm_tmr = le32_to_cpu(fadt->pm_tmr_blk); dprintf(4, "pm_tmr_blk=%x\n", pm_tmr); - if (!pm_tmr) - return; - - pmtimer_setup(pm_tmr, 3579); + if (pm_tmr) + pmtimer_setup(pm_tmr, 3579); } diff --git a/src/acpi.h b/src/acpi.h index e52470e..b23717a 100644 --- a/src/acpi.h +++ b/src/acpi.h @@ -5,7 +5,7 @@
void acpi_setup(void); u32 find_resume_vector(void); -void find_pmtimer(void); +void find_acpi_features(void);
#define RSDP_SIGNATURE 0x2052545020445352LL // "RSD PTR "
diff --git a/src/coreboot.c b/src/coreboot.c index f0484e1..c9ad2a8 100644 --- a/src/coreboot.c +++ b/src/coreboot.c @@ -12,7 +12,7 @@ #include "boot.h" // boot_add_cbfs #include "disk.h" // MAXDESCSIZE #include "config.h" // CONFIG_* -#include "acpi.h" // find_pmtimer +#include "acpi.h" // find_acpi_features #include "pci.h" // pci_probe_devices
@@ -214,7 +214,7 @@ coreboot_platform_setup(void) scan_tables(m->start, m->size); }
- find_pmtimer(); + find_acpi_features(); }
diff --git a/src/csm.c b/src/csm.c index 68f8830..4336e16 100644 --- a/src/csm.c +++ b/src/csm.c @@ -146,12 +146,12 @@ handle_csm_0002(struct bregs *regs) dprintf(3, "CSM PIRQ table at %p\n", PirAddr); }
- // For find_resume_vector()... and find_pmtimer() + // For find_resume_vector()... and find_acpi_features() if (csm_rsdp.signature == RSDP_SIGNATURE) { RsdpAddr = &csm_rsdp; dprintf(3, "CSM ACPI RSDP at %p\n", RsdpAddr);
- find_pmtimer(); + find_acpi_features(); }
// SMBIOS table needs to be copied into the f-seg diff --git a/src/xen.c b/src/xen.c index db542c3..5dfee9e 100644 --- a/src/xen.c +++ b/src/xen.c @@ -10,7 +10,7 @@ #include "memmap.h" // add_e820 #include "types.h" // ASM32FLAT #include "util.h" // copy_acpi_rsdp -#include "acpi.h" // find_pmtimer +#include "acpi.h" // find_acpi_features
#define INFO_PHYSICAL_ADDRESS 0x00001000
@@ -125,7 +125,7 @@ void xen_biostable_setup(void) for (i=0; i<info->tables_nr; i++) copy_table(tables[i]);
- find_pmtimer(); + find_acpi_features(); }
void xen_ramsize_preinit(void)
From: David Woodhouse David.Woodhouse@intel.com
Signed-off-by: David Woodhouse David.Woodhouse@intel.com --- src/acpi.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++----------- src/acpi.h | 14 ++++++++++++++ src/resume.c | 3 +++ 3 files changed, 64 insertions(+), 11 deletions(-)
diff --git a/src/acpi.c b/src/acpi.c index 36bd39a..195dc88 100644 --- a/src/acpi.c +++ b/src/acpi.c @@ -151,17 +151,6 @@ struct madt_local_nmi {
/* - * ACPI 2.0 Generic Address Space definition. - */ -struct acpi_20_generic_address { - u8 address_space_id; - u8 register_bit_width; - u8 register_bit_offset; - u8 reserved; - u64 address; -} PACKED; - -/* * HPET Description Table */ struct acpi_20_hpet { @@ -934,4 +923,51 @@ find_acpi_features(void) dprintf(4, "pm_tmr_blk=%x\n", pm_tmr); if (pm_tmr) pmtimer_setup(pm_tmr, 3579); + + // Theoretically we should check the 'reset_reg_sup' flag, but Windows + // doesn't and thus nobody seems to *set* it. If the table is large enough + // to include it, let the sanity checks in acpi_set_reset_reg() suffice. + if (fadt->length >= 129) { + void *p = fadt; + acpi_set_reset_reg(p + 116, *(u8 *)(p + 128)); + } +} + +static struct acpi_20_generic_address acpi_reset_reg; +static u8 acpi_reset_val; + +void +acpi_reboot(void) +{ + // Check it passed the sanity checks in acpi_set_reset_reg() and was set + if (acpi_reset_reg.register_bit_width != 8) + return; + + u64 addr = le64_to_cpu(acpi_reset_reg.address); + + dprintf(1, "ACPI hard reset %d:%llx (%x)\n", + acpi_reset_reg.address_space_id, addr, acpi_reset_val); + + switch (acpi_reset_reg.address_space_id) { + case 0: // System Memory + writeb((void *)(u32)addr, acpi_reset_val); + break; + case 1: // System I/O + outb(acpi_reset_val, addr); + break; + case 2: // PCI config space + pci_config_writeb(acpi_ga_to_bdf(addr), addr & 0xffff, acpi_reset_val); + break; + } +} + +void +acpi_set_reset_reg(struct acpi_20_generic_address *reg, u8 val) +{ + if (!reg || reg->address_space_id > 2 || + reg->register_bit_width != 8 || reg->register_bit_offset) + return; + + acpi_reset_reg = *reg; + acpi_reset_val = val; } diff --git a/src/acpi.h b/src/acpi.h index b23717a..6289953 100644 --- a/src/acpi.h +++ b/src/acpi.h @@ -3,9 +3,23 @@
#include "types.h" // u32
+/* + * ACPI 2.0 Generic Address Space definition. + */ +struct acpi_20_generic_address { + u8 address_space_id; + u8 register_bit_width; + u8 register_bit_offset; + u8 reserved; + u64 address; +} PACKED; +#define acpi_ga_to_bdf(addr) pci_to_bdf(0, (addr >> 32) & 0xffff, (addr >> 16) & 0xffff) + void acpi_setup(void); u32 find_resume_vector(void); void find_acpi_features(void); +void acpi_set_reset_reg(struct acpi_20_generic_address *reg, u8 val); +void acpi_reboot(void);
#define RSDP_SIGNATURE 0x2052545020445352LL // "RSD PTR "
diff --git a/src/resume.c b/src/resume.c index b30d62e..784abac 100644 --- a/src/resume.c +++ b/src/resume.c @@ -132,6 +132,9 @@ tryReboot(void) // Setup for reset on qemu. qemu_prep_reset();
+ // Reboot using ACPI RESET_REG + acpi_reboot(); + // Try keyboard controller reboot. i8042_reboot();
From: David Woodhouse David.Woodhouse@intel.com
The so-called "PCI" reboot at 0xCF9 is supposed to be a hard reset, while the keyboard controller is only a soft reset. So try pci_reboot() first.
Signed-off-by: David Woodhouse David.Woodhouse@intel.com --- src/resume.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/resume.c b/src/resume.c index 784abac..97e20b6 100644 --- a/src/resume.c +++ b/src/resume.c @@ -135,12 +135,12 @@ tryReboot(void) // Reboot using ACPI RESET_REG acpi_reboot();
- // Try keyboard controller reboot. - i8042_reboot(); - // Try PCI 0xcf9 reboot pci_reboot();
+ // Try keyboard controller reboot. + i8042_reboot(); + // Try triple fault asm volatile("int3");
On Sat, Feb 23, 2013 at 12:24:47AM +0000, David Woodhouse wrote:
From: David Woodhouse David.Woodhouse@intel.com
Signed-off-by: David Woodhouse David.Woodhouse@intel.com
Thanks.
I pushed patch 1.
Patch 2 and 3 look okay to me - if there are no further comments I'll push them.
I'm not sure about patch 4. I do think we want to try the "standard" ways before poking at unstandard ports.
-Kevin
On Sat, 2013-02-23 at 10:00 -0500, Kevin O'Connor wrote:
Patch 2 and 3 look okay to me - if there are no further comments I'll push them.
I think we're fairly happy with them. Laszlo put together the OVMF side (creating ACPI 2.0 tables instead of 1.0 and filling in the RESET_REG) and I've tested that it's doing the right thing. We should probably do something similar on the SeaBIOS side when it's creating its own tables, and feed same to acpi_set_reset_reg().
I'm not sure about patch 4. I do think we want to try the "standard" ways before poking at unstandard ports.
Yeah, maybe. Perhaps we could enable that only if PCI devices 8086/7113 or 8086/2918 were found in pci_probe_devices() ?
Then again, if we *do* find those devices then our ACPI tables should have given us 0xcf9 as a RESET_REG. So perhaps patch #4 is superfluous.
On Sat, Feb 23, 2013 at 04:28:06PM +0000, David Woodhouse wrote:
On Sat, 2013-02-23 at 10:00 -0500, Kevin O'Connor wrote:
Patch 2 and 3 look okay to me - if there are no further comments I'll push them.
I think we're fairly happy with them. Laszlo put together the OVMF side (creating ACPI 2.0 tables instead of 1.0 and filling in the RESET_REG) and I've tested that it's doing the right thing. We should probably do something similar on the SeaBIOS side when it's creating its own tables, and feed same to acpi_set_reset_reg().
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I'm not sure about patch 4. I do think we want to try the "standard" ways before poking at unstandard ports.
Yeah, maybe. Perhaps we could enable that only if PCI devices 8086/7113 or 8086/2918 were found in pci_probe_devices() ?
If you want to call acpi_set_reset_reg() from pciinit.c (like the way pmtimer_setup() is called) or something similar, that's fine with me.
-Kevin
On Sat, 2013-02-23 at 11:38 -0500, Kevin O'Connor wrote:
On Sat, Feb 23, 2013 at 04:28:06PM +0000, David Woodhouse wrote:
On Sat, 2013-02-23 at 10:00 -0500, Kevin O'Connor wrote:
Patch 2 and 3 look okay to me - if there are no further comments I'll push them.
I think we're fairly happy with them. Laszlo put together the OVMF side (creating ACPI 2.0 tables instead of 1.0 and filling in the RESET_REG) and I've tested that it's doing the right thing. We should probably do something similar on the SeaBIOS side when it's creating its own tables, and feed same to acpi_set_reset_reg().
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I believe it's on Laszlo's TODO list.
If you want to call acpi_set_reset_reg() from pciinit.c (like the way pmtimer_setup() is called) or something similar, that's fine with me.
If we actually *created* ACPI 2.0 tables with a RESET_REG I'd have done so already. As it is, I'm happy to wait until we do so, or until we do, or until we get the tables from Qemu.
David Woodhouse wrote:
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I believe it's on Laszlo's TODO list.
Put them in coreboot.
//Peter
On Sat, Feb 23, 2013 at 06:02:38PM +0100, Peter Stuge wrote:
David Woodhouse wrote:
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I believe it's on Laszlo's TODO list.
Put them in coreboot.
That wont help - coreboot would be in the same situation. The QEMU hardware is dynamic, and it's very hard for any guest to create tables that describe all the possible variants. That's why I think QEMU should create the tables directly - it's the only place that has all the info on the hardware being emulated.
-Kevin
On Sat, Feb 23, 2013 at 04:47:26PM +0000, David Woodhouse wrote:
On Sat, 2013-02-23 at 11:38 -0500, Kevin O'Connor wrote:
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I believe it's on Laszlo's TODO list.
Laszlo, what is your plan for doing this?
I did a review of the SeaBIOS code to see what information is currently used to generate the ACPI, SMBIOS, MPTABLE, and PIR bios tables. Here's what I came up with:
- hardcoded information: Most of the tables are simply hardcoded with various values. This should not be a problem to move to QEMU
- information passed in from QEMU: RamSize, RamSizeOver4G, fw_cfg (irq0-override, system suspend states, numa memory, additional acpi tables, smbios overrides). These should also be possible to obtain directly within QEMU (though I'm unsure how qemu exposes this information internally).
- CPU information: Number of CPUs, the apic id of the CPUs, which CPUs are active, and the cpuid information from the first CPU. Again this should be available in QEMU, but I'm not sure what the internal interfaces look like for obtaining it.
- Various hardware probes: The ioapic version, whether or not hpet is present, running on piix4 or ich9, whether or not acpi should be used. Again should be possible to obtain from QEMU with sufficient interfaces.
- PCI device info: The list of PCI devices, PCI buses, pin assignments, irq assignments, if hotplug supported, and memory regions. This should mostly be available in QEMU - order of initializing would be important so that the tables were initialized after all PCI devices.
Of these, the only thing I see that could be problematic is the PCI irq assignments (used in mptable) and the PCI region space (used in ACPI DSDT _SB.PCI.CRS). These are slightly problematic as they currently rely somewhat on the current SeaBIOS pciinit.c bridge/device setup. However, the mptable irqs is a simple algorithm that could be replicated in QEMU, and it looks to be of dubious value anyway (so could possibly be dropped from the mptable). Also, the PCI region space does not need to be exact, so a heuristic that just ensured it was large enough should suffice.
Given this, one possible way to migrate the ACPI tables from SeaBIOS would be to:
1 - replace the BDAT PCI range interface in SeaBIOS with a SSDT based template system similar to the way software suspend states are handled in SeaBIOS today. This would eliminate the only runtime references to SeaBIOS memory from ACPI.
2 - relicense the SeaBIOS' acpi.c, mptable.c, pirtable.c, smbios.c code to GPLv2 (from LGPLv3) and copy into QEMU. Only I've claimed a copyright since Fabrice's work (LGPLv2) and I'm willing to relicense. There have been a handful of contributors to these files, but they all look to be regular QEMU contributors so I don't think there would be any objections. Along with the code, the IASL parsing code and associated build python scripts would also need to be copied into QEMU.
3 - update the code to use the internal QEMU interfaces instead of the SeaBIOS interfaces to obtain the information outlined above.
4 - pass the tables from QEMU to SeaBIOS via the fw_cfg interface. The PIR, MPTABLE, and SMBIOS are easy to copy into memory from fw_cfg. The ACPI does have a few tables that are special (RSDP, RSDT, FADT, DSDT, FACS), but it should be easy to detect these and update the pointers in SeaBIOS during the copy to memory.
Thoughts?
-Kevin
On Sun, Feb 24, 2013 at 01:00:28PM -0500, Kevin O'Connor wrote:
On Sat, Feb 23, 2013 at 04:47:26PM +0000, David Woodhouse wrote:
On Sat, 2013-02-23 at 11:38 -0500, Kevin O'Connor wrote:
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I believe it's on Laszlo's TODO list.
Laszlo, what is your plan for doing this?
I did a review of the SeaBIOS code to see what information is currently used to generate the ACPI, SMBIOS, MPTABLE, and PIR bios tables. Here's what I came up with:
- hardcoded information: Most of the tables are simply hardcoded with various values. This should not be a problem to move to QEMU
IIRC SMBIOS has some tables with information about a BIOS.
information passed in from QEMU: RamSize, RamSizeOver4G, fw_cfg (irq0-override, system suspend states, numa memory, additional acpi tables, smbios overrides). These should also be possible to obtain directly within QEMU (though I'm unsure how qemu exposes this information internally).
CPU information: Number of CPUs, the apic id of the CPUs, which CPUs are active, and the cpuid information from the first CPU. Again this should be available in QEMU, but I'm not sure what the internal interfaces look like for obtaining it.
Various hardware probes: The ioapic version, whether or not hpet is present, running on piix4 or ich9, whether or not acpi should be used. Again should be possible to obtain from QEMU with sufficient interfaces.
PCI device info: The list of PCI devices, PCI buses, pin assignments, irq assignments, if hotplug supported, and memory regions. This should mostly be available in QEMU - order of initializing would be important so that the tables were initialized after all PCI devices.
Of these, the only thing I see that could be problematic is the PCI irq assignments (used in mptable) and the PCI region space (used in ACPI DSDT _SB.PCI.CRS). These are slightly problematic as they currently rely somewhat on the current SeaBIOS pciinit.c bridge/device setup. However, the mptable irqs is a simple algorithm that could be replicated in QEMU, and it looks to be of dubious value anyway (so could possibly be dropped from the mptable). Also, the PCI region space does not need to be exact, so a heuristic that just ensured it was large enough should suffice.
Again IIRC there are still OSes that uses mptable to obtain irq information. See 928d4dffef5c374.
Given this, one possible way to migrate the ACPI tables from SeaBIOS would be to:
1 - replace the BDAT PCI range interface in SeaBIOS with a SSDT based template system similar to the way software suspend states are handled in SeaBIOS today. This would eliminate the only runtime references to SeaBIOS memory from ACPI.
2 - relicense the SeaBIOS' acpi.c, mptable.c, pirtable.c, smbios.c code to GPLv2 (from LGPLv3) and copy into QEMU. Only I've claimed a copyright since Fabrice's work (LGPLv2) and I'm willing to relicense. There have been a handful of contributors to these files, but they all look to be regular QEMU contributors so I don't think there would be any objections. Along with the code, the IASL parsing code and associated build python scripts would also need to be copied into QEMU.
3 - update the code to use the internal QEMU interfaces instead of the SeaBIOS interfaces to obtain the information outlined above.
4 - pass the tables from QEMU to SeaBIOS via the fw_cfg interface. The PIR, MPTABLE, and SMBIOS are easy to copy into memory from fw_cfg. The ACPI does have a few tables that are special (RSDP, RSDT, FADT, DSDT, FACS), but it should be easy to detect these and update the pointers in SeaBIOS during the copy to memory.
Thoughts?
-Kevin
-- Gleb.
Il 25/02/2013 09:51, Gleb Natapov ha scritto:
Of these, the only thing I see that could be problematic is the PCI irq assignments (used in mptable) and the PCI region space (used in ACPI DSDT _SB.PCI.CRS). These are slightly problematic as they currently rely somewhat on the current SeaBIOS pciinit.c bridge/device setup. However, the mptable irqs is a simple algorithm that could be replicated in QEMU, and it looks to be of dubious value anyway (so could possibly be dropped from the mptable). Also, the PCI region space does not need to be exact, so a heuristic that just ensured it was large enough should suffice.
Again IIRC there are still OSes that uses mptable to obtain irq information. See 928d4dffef5c374.
It should work to use a fixed mptable that overrides interrupts 5/9/10/11, like we do in the MADT. It doesn't need to be just the interrupts that are in use.
Paolo
On Mon, Feb 25, 2013 at 10:51:55AM +0200, Gleb Natapov wrote:
On Sun, Feb 24, 2013 at 01:00:28PM -0500, Kevin O'Connor wrote:
I did a review of the SeaBIOS code to see what information is currently used to generate the ACPI, SMBIOS, MPTABLE, and PIR bios tables. Here's what I came up with:
- hardcoded information: Most of the tables are simply hardcoded with various values. This should not be a problem to move to QEMU
IIRC SMBIOS has some tables with information about a BIOS.
Most of the fields in these tables describe hardware, but you are correct that a few fields describe the firmware. After a quick review on the SeaBIOS code, I came up with 3 firmware fields populated today: SMBIOS type 1 bios_starting_address_segment, and ACPI FADT acpi_enable/acpi_disable fields. The bios_starting_address_segment is given a bogus value (0xe800) today and I think we can continue to do that. (There are a couple of other firmware specific fields in the SMBIOS type 1 struct (bios_characteristics), but they aren't populated today anyway.) For the SMI acpi_enable/disable fields we can update the SMI handler to use the values found in the ACPI tables. (Indeed, the q35 support is already broken here because it declares values that are different from what the SMI handler is expecting.)
Of these, the only thing I see that could be problematic is the PCI irq assignments (used in mptable) and the PCI region space (used in ACPI DSDT _SB.PCI.CRS). These are slightly problematic as they currently rely somewhat on the current SeaBIOS pciinit.c bridge/device setup. However, the mptable irqs is a simple algorithm that could be replicated in QEMU, and it looks to be of dubious value anyway (so could possibly be dropped from the mptable). Also, the PCI region space does not need to be exact, so a heuristic that just ensured it was large enough should suffice.
Again IIRC there are still OSes that uses mptable to obtain irq information. See 928d4dffef5c374.
Well, it shouldn't be needed for buses other than the root bus. (Today, SeaBIOS will add an mptable entry for every PCI-PCI bridge, and I don't think that is correct or useful.) The other info isn't too painful to put in QEMU - we just need the default PIN# -> IRQ mapping, which can be described with "const u8 pci_irqs[4] = { 10, 10, 11, 11 };".
-Kevin
On 02/24/13 19:00, Kevin O'Connor wrote:
On Sat, Feb 23, 2013 at 04:47:26PM +0000, David Woodhouse wrote:
On Sat, 2013-02-23 at 11:38 -0500, Kevin O'Connor wrote:
IMO, we need to move the ACPI table creation (and PIR/MPTABLE/SMBIOS) to QEMU and just have QEMU pass the tables to SeaBIOS for it to copy into memory like it does on CSM, coreboot, and Xen.
I believe it's on Laszlo's TODO list.
Laszlo, what is your plan for doing this?
Didn't have much of a plan until now, just "look into it".
It seems quite a bit of work (I expect many resubmits to qemu-devel) and I think I'd prefer to start working on it no earlier than March 18th. (Of course if anyone else implements it by then I'll be happy :))
I did a review of the SeaBIOS code to see what information is currently used to generate the ACPI, SMBIOS, MPTABLE, and PIR bios tables. Here's what I came up with:
hardcoded information: Most of the tables are simply hardcoded with various values. This should not be a problem to move to QEMU
information passed in from QEMU: RamSize, RamSizeOver4G, fw_cfg (irq0-override, system suspend states, numa memory, additional acpi tables, smbios overrides). These should also be possible to obtain directly within QEMU (though I'm unsure how qemu exposes this information internally).
In the long term I believe everything should be passed as fw_cfg files, one file per table. I'm not sure about the naming convention, but probably something like "acpi/SSDT".
This already seems quite messy. For example, acpi-dsdt.aml is built as part of SeaBIOS, then installed on the filesystem with qemu.
Qemu can load manually specified ACPI tables from files, with the -acpitable switch:
do_acpitable_option() [arch_init.c] acpi_table_add() [hw/acpi.c]
If no such option is specified, it auto-loads acpi-dsdt.aml (I'm ignoring q35 for now).
Then the loaded tables are all exported under one fw_cfg key:
pc_init1() [hw/pc_piix.c] pc_acpi_init() [hw/pc.c] acpi_table_add() [hw/acpi.c] pc_memory_init() [hw/pc.c] bochs_bios_init() fw_cfg_add_bytes(..., FW_CFG_ACPI_TABLES, acpi_tables, ...)
SeaBIOS then splits/"relabels" this single blob into sub-blobs,
qemu_cfg_legacy() loop qemu_romfile_add("acpi/table%d", QEMU_CFG_ACPI_TABLES, offset, len)
Then eg. the DSDT is installed in
qemu_platform_setup() acpi_setup() romfile_findprefix() qemu_cfg_read_file() via funcptr fill_dsdt()
So it's a seabios-qemu-seabios ping-pong.
At first I would export the ACPI table in qemu (install the fw_cfg file) in the same spot where currently the corresponding "base info" is prepared for SeaBIOS. If a table in SeaBIOS is currently built from several fw_cfg sources, then I'd probably export the qemu replacement in the latest "base info" spot, verifying if I can still collect earlier pieces of "base info".
I think we should move forward table-wise... each could take a separate series.
Don't know what to do with the -acpitable switch though. A mixture of loaded and autogenerated tables promises trouble.
- CPU information: Number of CPUs, the apic id of the CPUs, which CPUs are active, and the cpuid information from the first CPU. Again this should be available in QEMU, but I'm not sure what the internal interfaces look like for obtaining it.
I'd just look at what the fw_cfg info is composed from, and re-use it.
Various hardware probes: The ioapic version, whether or not hpet is present, running on piix4 or ich9, whether or not acpi should be used. Again should be possible to obtain from QEMU with sufficient interfaces.
PCI device info: The list of PCI devices, PCI buses, pin assignments, irq assignments, if hotplug supported, and memory regions. This should mostly be available in QEMU - order of initializing would be important so that the tables were initialized after all PCI devices.
Of these, the only thing I see that could be problematic is the PCI irq assignments (used in mptable) and the PCI region space (used in ACPI DSDT _SB.PCI.CRS). These are slightly problematic as they currently rely somewhat on the current SeaBIOS pciinit.c bridge/device setup. However, the mptable irqs is a simple algorithm that could be replicated in QEMU, and it looks to be of dubious value anyway (so could possibly be dropped from the mptable). Also, the PCI region space does not need to be exact, so a heuristic that just ensured it was large enough should suffice.
Without the CRS stuff efifb wasn't working in OVMF-based guests, so I already had to implement a similar search in OVMF (with ample guidance from Gerd & others of course). The series is archived under
http://thread.gmane.org/gmane.comp.bios.tianocore.devel/81
The interesting commits are: http://tianocore.git.sourceforge.net/git/gitweb.cgi?p=tianocore/edk2;a=commi... http://tianocore.git.sourceforge.net/git/gitweb.cgi?p=tianocore/edk2;a=commi...
The search iterates over the memory map (below 4GB), determines the highest reserved/system memory address, plus the smallest common "bounding box" for all mmio ranges. Then this "bounding box" is clamped up by the highest RAM address, and the result (if not the empty set) is communicated via the same BDAT method. (The struct is allocated from the "reserved pool".)
I guess this same iteration could be done inside qemu, using the memory region API. The "info mtree" implementation could be a good example. (See mtree_info() in "memory.c".)
Given this, one possible way to migrate the ACPI tables from SeaBIOS would be to:
1 - replace the BDAT PCI range interface in SeaBIOS with a SSDT based template system similar to the way software suspend states are handled in SeaBIOS today. This would eliminate the only runtime references to SeaBIOS memory from ACPI.
I've made peace with generating AML in C source.
2 - relicense the SeaBIOS' acpi.c, mptable.c, pirtable.c, smbios.c code to GPLv2 (from LGPLv3) and copy into QEMU. Only I've claimed a copyright since Fabrice's work (LGPLv2) and I'm willing to relicense.
That's very generous of you, thank you.
There have been a handful of contributors to these files, but they all look to be regular QEMU contributors so I don't think there would be any objections. Along with the code, the IASL parsing code and associated build python scripts would also need to be copied into QEMU.
This is one area where I expect many stylistic remarks...
Plus, although python is already used in the qemu build process (to generate C source from qapi schema, at least), "iasl" is not yet a direct dependency of qemu.
3 - update the code to use the internal QEMU interfaces instead of the SeaBIOS interfaces to obtain the information outlined above.
I believe rather than "copy + update" it'd be "read + rewrite" (which of course still depends on you relicensing the SeaBIOS basis).
4 - pass the tables from QEMU to SeaBIOS via the fw_cfg interface. The PIR, MPTABLE, and SMBIOS are easy to copy into memory from fw_cfg. The ACPI does have a few tables that are special (RSDP, RSDT, FADT, DSDT, FACS), but it should be easy to detect these and update the pointers in SeaBIOS during the copy to memory.
That's how ACPI tables are installed in OVMF / edk2 as well; linked tables are special-cased and pointers (and checksums) are set on the fly.
Thanks Laszlo
Laszlo Ersek wrote:
I've made peace with generating AML in C source.
As it happens, coreboot has a good infrastructure for generating AML at runtime since years already.
Of course static tables in coreboot are no better than static tables elsewhere. There are two reasons why moving all this complexity into coreboot makes sense:
1. Significant amounts of code can quite likely be shared between many different hypervisors, since coreboot already shares significant code between many different hardware platforms, never mind the reuse possible across *both* hypervisors and hardware.
2. Having (many!) hypervisor-specific special cases in SeaBIOS seems wildly schizophrenic without bringing any significant benefits, compared to factoring all of that out into a codebase which *already does many of the needed things*.
I understand that noone really cares about those arguments as long as I don't do their work for them, but I'm afraid I will not stop complaining as long as SeaBIOS grows with more and more stuff that has nothing to do with a BIOS environment but has to do with lower level platform init. Maybe someday someone will actually get the point..
//Peter
On 02/25/13 14:43, Peter Stuge wrote:
- Significant amounts of code can quite likely be shared between
many different hypervisors, since coreboot already shares significant code between many different hardware platforms, never mind the reuse possible across *both* hypervisors and hardware.
Not really. Virtual hardware can be reconfigured in ways which is impossible on real hardware. This is (party) where the complexity we have in seabios wrt. acpi comes from.
- Having (many!) hypervisor-specific special cases in SeaBIOS seems
wildly schizophrenic without bringing any significant benefits, compared to factoring all of that out into a codebase which *already does many of the needed things*.
It's a tradeoff. On one hand letting coreboot handle hardware initialialization would reduce the amout of code in seabios we have to maintain. On the other hand adding coreboot as middle man between qemu and seabios would add some complexity to the whole mix.
I'm not convinced using coreboot is a clear win, especially with EFI coming. Can coreboot run tianocore as payload?
I understand that noone really cares about those arguments as long as I don't do their work for them,
If using coreboot would be a clear and obvious win someone would have done that work already.
ACPI not working at all in linux guests when using coreboot with seabios payload doesn't exactly encourage exploring that option btw.
but I'm afraid I will not stop complaining as long as SeaBIOS grows with more and more stuff that has nothing to do with a BIOS environment but has to do with lower level platform init.
Well, *this* discussion is about moving stuff *out* of seabios.
Maybe someday someone will actually get the point..
I figured long ago which point you are trying to make. I don't agree though.
cheers, Gerd
On Mon, 2013-02-25 at 15:46 +0100, Gerd Hoffmann wrote:
I'm not convinced using coreboot is a clear win, especially with EFI coming. Can coreboot run tianocore as payload?
It's being worked on.
Gerd Hoffmann wrote:
- Significant amounts of code can quite likely be shared between
many different hypervisors, since coreboot already shares significant code between many different hardware platforms, never mind the reuse possible across *both* hypervisors and hardware.
Not really.
Yes, really.
Virtual hardware can be reconfigured in ways which is impossible on real hardware. This is (party) where the complexity we have in seabios wrt. acpi comes from.
Yes. And the more flexibility is required the more complex the code gets. It sounds like there will soon be need for a more generic PCI resource allocator, which is another thing that coreboot already has.
- Having (many!) hypervisor-specific special cases in SeaBIOS seems
wildly schizophrenic without bringing any significant benefits, compared to factoring all of that out into a codebase which *already does many of the needed things*.
It's a tradeoff. On one hand letting coreboot handle hardware initialialization would reduce the amout of code in seabios we have to maintain. On the other hand adding coreboot as middle man between qemu and seabios would add some complexity to the whole mix.
What complexities have you run into?
coreboot can of course be improved further, but as you may know SeaBIOS gets built by default by the coreboot build process already, so using coreboot wouldn't even add extra steps for a manual build.
I'm not convinced using coreboot is a clear win, especially with EFI coming. Can coreboot run tianocore as payload?
Work is ongoing to make edk2 a good coreboot payload. It already works for some values of works, but more work is needed. Progress has been fast the last month or so, thanks to efforts by David and Patrick Georgi.
ACPI not working at all in linux guests when using coreboot with seabios payload doesn't exactly encourage exploring that option btw.
Then the way the QEMU mainboard does ACPI in coreboot needs fixing, which is quite possible because I don't know if someone has actually implemented ACPI at all for QEMU, and if so it is not likely using the more modern facilities but likely to have static ASL. The point is not what is already there, the point is that adding this stuff into SeaBIOS or QEMU for that matter would mean re-inventing *yet another* wheel which is *already* finished in coreboot.
but I'm afraid I will not stop complaining as long as SeaBIOS grows with more and more stuff that has nothing to do with a BIOS environment but has to do with lower level platform init.
Well, *this* discussion is about moving stuff *out* of seabios.
Good point, but it seems to be about moving stuff into each respective hypervisor, when in fact much of that code could probably be common in coreboot without significant effort.
//Peter
Hi,
gets. It sounds like there will soon be need for a more generic PCI resource allocator, which is another thing that coreboot already has.
--verbose please.
- Having (many!) hypervisor-specific special cases in SeaBIOS seems
wildly schizophrenic without bringing any significant benefits, compared to factoring all of that out into a codebase which *already does many of the needed things*.
It's a tradeoff. On one hand letting coreboot handle hardware initialialization would reduce the amout of code in seabios we have to maintain. On the other hand adding coreboot as middle man between qemu and seabios would add some complexity to the whole mix.
What complexities have you run into?
You have three pieces of software now which must correctly work hand in hand instead of just two.
coreboot can of course be improved further, but as you may know SeaBIOS gets built by default by the coreboot build process already, so using coreboot wouldn't even add extra steps for a manual build.
I'm more concerned about development and debugging than about the build process.
The build process isn't that great IMHO (although that is probably fixable without too much effort). coreboot fetching seabios as git submodule might be convenient for developers. For package building it sucks big time, you certainly don't want your package builder clone seabios.git each time coreboot is built. And if you flip CONFIG_SEABIOS to "no" to fix that (then add prebuilt seabios.elf via cbfstool once the build is finished) suddenly a bunch of config options get different default values ...
I'm not convinced using coreboot is a clear win, especially with EFI coming. Can coreboot run tianocore as payload?
Work is ongoing to make edk2 a good coreboot payload. It already works for some values of works, but more work is needed. Progress has been fast the last month or so, thanks to efforts by David and Patrick Georgi.
I'll have a look.
payloads/tianocoreboot/README hints that this works for Ia32 only atm. Is that true or just an incomplete README?
Can tianocore grab acpi tables from coreboot?
ACPI not working at all in linux guests when using coreboot with seabios payload doesn't exactly encourage exploring that option btw.
Then the way the QEMU mainboard does ACPI in coreboot needs fixing, which is quite possible because I don't know if someone has actually implemented ACPI at all for QEMU, and if so it is not likely using the more modern facilities but likely to have static ASL. The point is not what is already there, the point is that adding this stuff into SeaBIOS or QEMU for that matter would mean re-inventing *yet another* wheel which is *already* finished in coreboot.
Yes, the infrastructure is there. Which helps of course. But qemu support certainly isn't finished. It is broken.
but I'm afraid I will not stop complaining as long as SeaBIOS grows with more and more stuff that has nothing to do with a BIOS environment but has to do with lower level platform init.
Well, *this* discussion is about moving stuff *out* of seabios.
Good point, but it seems to be about moving stuff into each respective hypervisor, when in fact much of that code could probably be common in coreboot without significant effort.
When moving into qemu it will be shared too as both opensource hypervisors (xen+kvm) use qemu, so there isn't duplication.
Generating the tables in qemu will kill the pain point of having to pass the relevant configuration information from qemu to seabios/tianocore/coreboot/whatever. It will also make sure the acpi tables are in sync with qemu's virtual hardware.
cheers, Gerd
On Tue, 2013-02-26 at 10:03 +0100, Gerd Hoffmann wrote:
Generating the tables in qemu will … make sure the acpi tables are in sync with qemu's virtual hardware.
Which would, for example, have saved us some of the recent soul-searching about whether SeaBIOS/OVMF should put 0xcf9 into the ACPI RESET_REG given that it can't know whether it's running on a version of qemu which has actually implemented it...
On 02/26/13 10:03, Gerd Hoffmann wrote:
Can tianocore grab acpi tables from coreboot?
Not that I know of. (... It may have been a rhetorical question.)
When running on Xen, an area is searched for the RSDP, and linked tables (prepared by Xen's hvmloader I think) are installed by OVMF.
When running on qemu, static tables are used. I strived to adapt them as much as I could to qemu hardware. Some tables (recognized by header) are copied & modified dynamically before installation: MADT, SSDT.
Getting a precise picture of qemu hardware in the form of ACPI tables (over fw_cfg) would not only allow OVMF to pass on better tables to OSPM; OVMF could *maybe* use them itself. They might allow us to remove hard-wired BDFs, ports, ranges etc. in platform initialization (OvmfPkg/PlatformPei) and even in boot device selection (OvmfPkg/Library/PlatformBdsLib/BdsPlatform.c).
Laszlo
On 02/26/13 19:30, Laszlo Ersek wrote:
On 02/26/13 10:03, Gerd Hoffmann wrote:
Can tianocore grab acpi tables from coreboot?
Not that I know of. (... It may have been a rhetorical question.)
Wasn't rhetorical. Generating the apci tables in both ovmf and seabios doesn't make that much sense, better would be to have that in one place.
Option one is to let qemu provide them, then both ovmf and seabios can grab them via fw_cfg.
Option two is to use coreboot underneath, then let both seabios and ovmf grab the tables from coreboot. Which of course requires that ovmf can use the tables provided by coreboot. Seabios can do that today.
/me looked around a bit in the coreboot source code. Looks like they simply copyed over the dsdt from seabios as-is, which doesn't fly due to the pci _crs method referencing ssdt's BDAT.
From the quick look it seems they do *not* generate the dsdt
dynamically, only the other tables (simliar to seabios). So switching to coreboot probably doesn't help to remove the dsdt patching code we have in seabios.
When running on Xen, an area is searched for the RSDP, and linked tables (prepared by Xen's hvmloader I think) are installed by OVMF.
Ok, so doing something simliar when running on coreboot should be possible without major headache.
When running on qemu, static tables are used. I strived to adapt them as much as I could to qemu hardware. Some tables (recognized by header) are copied & modified dynamically before installation: MADT, SSDT.
Getting a precise picture of qemu hardware in the form of ACPI tables (over fw_cfg) would not only allow OVMF to pass on better tables to OSPM; OVMF could *maybe* use them itself.
qemu 1.4+ places the dsdt into fw_cfg. But again, that is the one which references BDAT, so using it as-is in ovmf isn't going to fly ...
cheers, Gerd
On 02/27/13 10:19, Gerd Hoffmann wrote:
On 02/26/13 19:30, Laszlo Ersek wrote:
On 02/26/13 10:03, Gerd Hoffmann wrote:
Can tianocore grab acpi tables from coreboot?
Not that I know of. (... It may have been a rhetorical question.)
Wasn't rhetorical. Generating the apci tables in both ovmf and seabios doesn't make that much sense, better would be to have that in one place.
Option one is to let qemu provide them, then both ovmf and seabios can grab them via fw_cfg.
Option two is to use coreboot underneath, then let both seabios and ovmf grab the tables from coreboot. Which of course requires that ovmf can use the tables provided by coreboot. Seabios can do that today.
/me looked around a bit in the coreboot source code. Looks like they simply copyed over the dsdt from seabios as-is, which doesn't fly due to the pci _crs method referencing ssdt's BDAT.
From the quick look it seems they do *not* generate the dsdt dynamically, only the other tables (simliar to seabios). So switching to coreboot probably doesn't help to remove the dsdt patching code we have in seabios.
When running on Xen, an area is searched for the RSDP, and linked tables (prepared by Xen's hvmloader I think) are installed by OVMF.
Ok, so doing something simliar when running on coreboot should be possible without major headache.
When running on qemu, static tables are used. I strived to adapt them as much as I could to qemu hardware. Some tables (recognized by header) are copied & modified dynamically before installation: MADT, SSDT.
Getting a precise picture of qemu hardware in the form of ACPI tables (over fw_cfg) would not only allow OVMF to pass on better tables to OSPM; OVMF could *maybe* use them itself.
qemu 1.4+ places the dsdt into fw_cfg. But again, that is the one which references BDAT, so using it as-is in ovmf isn't going to fly ...
As far as I understood Kevin suggested to - remove the BDAT (FWDT in OVMF) and the related DSDT logic, - prepare the byte-code of the CRS dynamically from a template (t could be a Name instead of a Method) and patch the ranges before installation.
Laszlo
On Wed, Feb 27, 2013 at 03:09:25PM +0100, Laszlo Ersek wrote:
On 02/27/13 10:19, Gerd Hoffmann wrote:
On 02/26/13 19:30, Laszlo Ersek wrote:
On 02/26/13 10:03, Gerd Hoffmann wrote:
Can tianocore grab acpi tables from coreboot?
Not that I know of. (... It may have been a rhetorical question.)
Wasn't rhetorical. Generating the apci tables in both ovmf and seabios doesn't make that much sense, better would be to have that in one place.
Option one is to let qemu provide them, then both ovmf and seabios can grab them via fw_cfg.
Option two is to use coreboot underneath, then let both seabios and ovmf grab the tables from coreboot. Which of course requires that ovmf can use the tables provided by coreboot. Seabios can do that today.
/me looked around a bit in the coreboot source code. Looks like they simply copyed over the dsdt from seabios as-is, which doesn't fly due to the pci _crs method referencing ssdt's BDAT.
From the quick look it seems they do *not* generate the dsdt dynamically, only the other tables (simliar to seabios). So switching to coreboot probably doesn't help to remove the dsdt patching code we have in seabios.
When running on Xen, an area is searched for the RSDP, and linked tables (prepared by Xen's hvmloader I think) are installed by OVMF.
Ok, so doing something simliar when running on coreboot should be possible without major headache.
When running on qemu, static tables are used. I strived to adapt them as much as I could to qemu hardware. Some tables (recognized by header) are copied & modified dynamically before installation: MADT, SSDT.
Getting a precise picture of qemu hardware in the form of ACPI tables (over fw_cfg) would not only allow OVMF to pass on better tables to OSPM; OVMF could *maybe* use them itself.
qemu 1.4+ places the dsdt into fw_cfg. But again, that is the one which references BDAT, so using it as-is in ovmf isn't going to fly ...
As far as I understood Kevin suggested to
- remove the BDAT (FWDT in OVMF) and the related DSDT logic,
- prepare the byte-code of the CRS dynamically from a template (t could
be a Name instead of a Method) and patch the ranges before installation.
My suggestion was to replace BDAT with a dynamically generated P0S (and P[01][SLE][LH]?) in an SSDT (by updating ssdt-sups.dsl).
-Kevin
Gerd Hoffmann wrote:
Option one is to let qemu provide them, then both ovmf and seabios can grab them via fw_cfg.
Option two is to use coreboot underneath
I don't think one should exclude the other, I think it would make great sense to combine them. So have coreboot on QEMU read some hardware description from QEMU and use that either as input to a table generator, or even have the read data be the tables themselves.
It might or might not be easier and/or make more sense to generate tables in coreboot rather than in QEMU - that's unclear so far.
From the quick look it seems they do *not* generate the dsdt dynamically, only the other tables (simliar to seabios). So switching to coreboot probably doesn't help to remove the dsdt patching code we have in seabios.
Is there something inherent to the AML generator code in coreboot which makes it suck for the purpose of also generating a DSDT?
//Peter
On 02/28/13 06:23, Peter Stuge wrote:
Gerd Hoffmann wrote:
Option one is to let qemu provide them, then both ovmf and seabios can grab them via fw_cfg.
Option two is to use coreboot underneath
I don't think one should exclude the other, I think it would make great sense to combine them. So have coreboot on QEMU read some hardware description from QEMU and use that either as input to a table generator, or even have the read data be the tables themselves.
Yes, sure, if we let qemu generate the tables coreboot can just grab+use them too. But the coreboot table generator code wouldn't be used then ...
From the quick look it seems they do *not* generate the dsdt dynamically, only the other tables (simliar to seabios). So switching to coreboot probably doesn't help to remove the dsdt patching code we have in seabios.
Is there something inherent to the AML generator code in coreboot which makes it suck for the purpose of also generating a DSDT?
I think it is alot of work to get that going.
Typically the dsdt is static while any dynamic stuff is placed in a (generated) ssdt and is just referenced from the dsdt. coreboot is not different here, consequently it has ready-to-go code to generate the stuff typically found in ssdt (and other) tables. But support for generating dsdt constructs simply isn't there (or very limited, some ressource template bits seem to be there because those are used in ssdts too).
cheers, Gerd
On Mon, 2013-02-25 at 15:46 +0100, Gerd Hoffmann wrote:
- Having (many!) hypervisor-specific special cases in SeaBIOS seems
wildly schizophrenic without bringing any significant benefits, compared to factoring all of that out into a codebase which *already does many of the needed things*.
It's a tradeoff. On one hand letting coreboot handle hardware initialialization would reduce the amout of code in seabios we have to maintain. On the other hand adding coreboot as middle man between qemu and seabios would add some complexity to the whole mix.
But if we do it *without* coreboot, then we get to reimplement the whole "seabios-qemu-seabios ping-pong", as Laszlo describes it, in Tianocore as *well* as SeaBIOS. I'm not sure we really want to duplicate that code, which looks like it will have tricky interactions between host and guest. When viewed that way, it's not clear that doing it in coreboot is really adding complexity; in that respect it *simplifies* things a bit.
Using coreboot everywhere sounds good to me. Especially because on the Tianocore side we can push for Patrick's CorebootPkg to be the *primary* platform; we could even consider deprecating the qemu-specific OvmfPkg completely. And *that* in turn ensures that what everyone's working on is something that ought to be suitable for real hardware, rather than just qemu.
It would also help some people on the UEFI side to get over their bizarre misconception that coreboot is antithetical to UEFI :)
I'd be quite happy to get to the point where the default firmware for Qemu is Coreboot + Tianocore + SeaBIOS (as CSM).
On 02/28/13 10:37, David Woodhouse wrote:
On Mon, 2013-02-25 at 15:46 +0100, Gerd Hoffmann wrote:
- Having (many!) hypervisor-specific special cases in SeaBIOS seems
wildly schizophrenic without bringing any significant benefits, compared to factoring all of that out into a codebase which *already does many of the needed things*.
It's a tradeoff. On one hand letting coreboot handle hardware initialialization would reduce the amout of code in seabios we have to maintain. On the other hand adding coreboot as middle man between qemu and seabios would add some complexity to the whole mix.
But if we do it *without* coreboot, then we get to reimplement the whole "seabios-qemu-seabios ping-pong", as Laszlo describes it, in Tianocore as *well* as SeaBIOS.
A good part of that "ping pong" is for acpi table generation. That we don't want duplicate *that* in both tianocore and seabios is pretty clear. So the question is whenever we want move acpi table generation to coreboot or to qemu?
The advantage of moving it to coreboot is that we already have some table generation infrastructure there.
The advantage of moving it to qemu is that we can easily keep acpi tables in sync with the virtual hardware. We need less communication between qemu + firmware. We gain flexibility: All firmware (seabios / tianocore / coreboot) can pick up & use the tables. That way we should get a smooth migration path from pure seabios to coreboot+seabios (or coreboot+tianocore+seabios), can switch firmware images for regression testing etc.
Using coreboot everywhere sounds good to me. Especially because on the Tianocore side we can push for Patrick's CorebootPkg to be the *primary* platform; we could even consider deprecating the qemu-specific OvmfPkg completely. And *that* in turn ensures that what everyone's working on is something that ought to be suitable for real hardware, rather than just qemu.
Makes sense, but is quite some way to go.
cheers, Gerd
Il 25/02/2013 14:22, Laszlo Ersek ha scritto:
There have been a handful of contributors to these files, but they all look to be regular QEMU contributors so I don't think there would be any objections. Along with the code, the IASL parsing code and associated build python scripts would also need to be copied into QEMU.
This is one area where I expect many stylistic remarks...
Plus, although python is already used in the qemu build process (to generate C source from qapi schema, at least), "iasl" is not yet a direct dependency of qemu.
No big deal, firmware files are kept in the repository in compiled form. So adding new dependencies is not a problem.
Paolo