[coreboot-gerrit] Patch set updated for coreboot: soc/intel/common: Add SMM support

Hannah Williams (hannah.williams@intel.com) gerrit at coreboot.org
Mon Apr 11 23:13:01 CEST 2016


Hannah Williams (hannah.williams at intel.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/14290

-gerrit

commit fb76d439403c0345c4bf63bf90811cb55a517d5b
Author: Ravi Sarawadi <ravishankar.sarawadi at intel.com>
Date:   Mon Apr 4 17:04:11 2016 -0700

    soc/intel/common: Add SMM support
    
    Add SoC agnostic SMM feature support components.
    
    Change-Id: I3188481e175936ff1f9f0a35ed5b123bb0fe8397
    Signed-off-by: Ravi Sarawadi <ravishankar.sarawadi at intel.com>
---
 src/soc/intel/common/Kconfig       |   4 +
 src/soc/intel/common/Makefile.inc  |   6 +
 src/soc/intel/common/smi.c         |  95 ++++++++
 src/soc/intel/common/smihandler.c  | 406 ++++++++++++++++++++++++++++++++++
 src/soc/intel/common/smmrelocate.c | 436 +++++++++++++++++++++++++++++++++++++
 5 files changed, 947 insertions(+)

diff --git a/src/soc/intel/common/Kconfig b/src/soc/intel/common/Kconfig
index 8b68aad..5ffb945 100644
--- a/src/soc/intel/common/Kconfig
+++ b/src/soc/intel/common/Kconfig
@@ -59,4 +59,8 @@ config MMA_BLOBS_PATH
 	depends on MMA
 	default "3rdparty/blobs/mainboard/$(MAINBOARDDIR)/mma"
 
+config SMM_COMMON
+	bool
+	default n
+
 endif # SOC_INTEL_COMMON
diff --git a/src/soc/intel/common/Makefile.inc b/src/soc/intel/common/Makefile.inc
index a7218b7..edda9e2 100644
--- a/src/soc/intel/common/Makefile.inc
+++ b/src/soc/intel/common/Makefile.inc
@@ -1,5 +1,7 @@
 ifeq ($(CONFIG_SOC_INTEL_COMMON),y)
 
+subdirs-$(CONFIG_SMM_COMMON) += ../../../cpu/x86/smm
+
 verstage-$(CONFIG_SOC_INTEL_COMMON_RESET) += reset.c
 
 romstage-$(CONFIG_CACHE_MRC_SETTINGS) += mrc_cache.c
@@ -14,6 +16,10 @@ ramstage-$(CONFIG_SOC_INTEL_COMMON_RESET) += reset.c
 ramstage-y += util.c
 ramstage-$(CONFIG_MMA) += mma.c
 ramstage-$(CONFIG_SOC_INTEL_COMMON_ACPI_WAKE_SOURCE) += acpi_wake_source.c
+ramstage-$(CONFIG_SMM_COMMON) += smi.c
+ramstage-$(CONFIG_SMM_COMMON) += smmrelocate.c
+
+smm-$(CONFIG_SMM_COMMON) += smihandler.c
 
 # Create and add the MRC cache to the cbfs image
 ifneq ($(CONFIG_CHROMEOS),y)
diff --git a/src/soc/intel/common/smi.c b/src/soc/intel/common/smi.c
new file mode 100644
index 0000000..d9858f8
--- /dev/null
+++ b/src/soc/intel/common/smi.c
@@ -0,0 +1,95 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2008-2009 coresystems GmbH
+ * Copyright (C) 2014 Google Inc.
+ * Copyright (C) 2015-2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <device/device.h>
+#include <device/pci.h>
+#include <console/console.h>
+#include <arch/io.h>
+#include <cpu/cpu.h>
+#include <cpu/x86/cache.h>
+#include <cpu/x86/smm.h>
+#include <string.h>
+#include <soc/pm.h>
+#include <soc/smm.h>
+
+void southbridge_smm_clear_state(void)
+{
+	printk(BIOS_DEBUG, "Initializing Southbridge SMI...");
+
+	if (get_smi_en() & APMC_EN) {
+		printk(BIOS_INFO, "SMI# handler already enabled?\n");
+		return;
+	}
+
+	printk(BIOS_DEBUG, "Done\n");
+
+	/* Dump and clear status registers */
+	clear_smi_status();
+	clear_pm1_status();
+	clear_tco_status();
+	clear_gpe_status();
+}
+
+void southbridge_smm_enable_smi(void)
+{
+	printk(BIOS_DEBUG, "Enabling SMIs.\n");
+	/* Configure events */
+	enable_pm1(PWRBTN_EN | GBL_EN);
+	disable_gpe(PME_B0_EN);
+
+	/* Enable SMI generation */
+	enable_smi(APMC_EN | SLP_SMI_EN | GBL_SMI_EN | EOS);
+}
+
+void southbridge_trigger_smi(void)
+{
+	/* raise an SMI interrupt */
+	printk(BIOS_SPEW, "  ... raise SMI#\n");
+	outb(0x00, APM_CNT);
+}
+
+void southbridge_clear_smi_status(void)
+{
+	/* Clear SMI status */
+	clear_smi_status();
+
+	/* Clear PM1 status */
+	clear_pm1_status();
+
+	/* Set EOS bit so other SMIs can occur. */
+	enable_smi(EOS);
+}
+
+void smm_setup_structures(void *gnvs, void *tcg, void *smi1)
+{
+	/*
+	 * Issue SMI to set the GNVS pointer in SMM.
+	 * tcg and smi1 are unused.
+	 *
+	 * EAX = APM_CNT_GNVS_UPDATE
+	 * EBX = gnvs pointer
+	 * EDX = APM_CNT
+	 */
+	asm volatile (
+		"outb %%al, %%dx\n\t"
+		: /* ignore result */
+		: "a" (APM_CNT_GNVS_UPDATE),
+		  "b" ((u32)gnvs),
+		  "d" (APM_CNT)
+	);
+}
diff --git a/src/soc/intel/common/smihandler.c b/src/soc/intel/common/smihandler.c
new file mode 100644
index 0000000..6b4afe0
--- /dev/null
+++ b/src/soc/intel/common/smihandler.c
@@ -0,0 +1,406 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 Google Inc.
+ * Copyright (C) 2015-2016 Intel Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <arch/hlt.h>
+#include <arch/io.h>
+#include <console/console.h>
+#include <cpu/x86/cache.h>
+#include <cpu/x86/smm.h>
+#include <device/pci_def.h>
+#include <elog.h>
+#include <soc/nvs.h>
+#include <soc/pm.h>
+#include <soc/gpio.h>
+#include <soc/iomap.h>
+#include <spi-generic.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+/* GNVS needs to be set by coreboot initiating a software SMI. */
+static struct global_nvs_t *gnvs;
+static int smm_initialized;
+
+
+void southbridge_smi_set_eos(void)
+{
+	enable_smi(EOS);
+}
+
+struct global_nvs_t *smm_get_gnvs(void)
+{
+	return gnvs;
+}
+
+static void busmaster_disable_on_bus(int bus)
+{
+	int slot, func;
+	unsigned int val;
+	unsigned char hdr;
+
+	for (slot = 0; slot < 0x20; slot++) {
+		for (func = 0; func < 8; func++) {
+			u32 reg32;
+			device_t dev = PCI_DEV(bus, slot, func);
+
+			val = pci_read_config32(dev, PCI_VENDOR_ID);
+
+			if (val == 0xffffffff || val == 0x00000000 ||
+			    val == 0x0000ffff || val == 0xffff0000)
+				continue;
+
+			/* Disable Bus Mastering for this one device */
+			reg32 = pci_read_config32(dev, PCI_COMMAND);
+			reg32 &= ~PCI_COMMAND_MASTER;
+			pci_write_config32(dev, PCI_COMMAND, reg32);
+
+			/* If this is a bridge, then follow it. */
+			hdr = pci_read_config8(dev, PCI_HEADER_TYPE);
+			hdr &= 0x7f;
+			if (hdr == PCI_HEADER_TYPE_BRIDGE ||
+			    hdr == PCI_HEADER_TYPE_CARDBUS) {
+				unsigned int buses;
+				buses = pci_read_config32(dev, PCI_PRIMARY_BUS);
+				busmaster_disable_on_bus((buses >> 8) & 0xff);
+			}
+		}
+	}
+}
+
+
+static void southbridge_smi_sleep(void)
+{
+	uint32_t reg32;
+	uint8_t slp_typ;
+
+	/* First, disable further SMIs */
+	disable_smi(SLP_SMI_EN);
+
+	/* Figure out SLP_TYP */
+	reg32 = inl(ACPI_PMIO_BASE + PM1_CNT);
+	printk(BIOS_SPEW, "SMI#: SLP = 0x%08x\n", reg32);
+	slp_typ = (reg32 >> 10) & 7;
+
+	/* Do any mainboard sleep handling */
+	mainboard_smi_sleep(slp_typ-2);
+
+#if IS_ENABLED(CONFIG_ELOG_GSMI)
+	/* Log S3, S4, and S5 entry */
+	if (slp_typ >= 5)
+		elog_add_event_byte(ELOG_TYPE_ACPI_ENTER, slp_typ-2);
+#endif
+      /* Clear pending GPE events */
+	clear_gpe_status();
+
+	/* Next, do the deed. */
+
+	switch (slp_typ) {
+	case SLP_TYP_S0:
+		printk(BIOS_DEBUG, "SMI#: Entering S0 (On)\n");
+		break;
+	case SLP_TYP_S3:
+		printk(BIOS_DEBUG, "SMI#: Entering S3 (Suspend-To-RAM)\n");
+
+		/* Invalidate the cache before going to S3 */
+		wbinvd();
+		break;
+	case SLP_TYP_S4:
+		printk(BIOS_DEBUG, "SMI#: Entering S4 (Suspend-To-Disk)\n");
+		break;
+	case SLP_TYP_S5:
+		printk(BIOS_DEBUG, "SMI#: Entering S5 (Soft Power off)\n");
+
+		/* Disable all GPE */
+		disable_all_gpe();
+
+		/* also iterates over all bridges on bus 0 */
+		busmaster_disable_on_bus(0);
+		break;
+	default:
+		printk(BIOS_DEBUG, "SMI#: ERROR: SLP_TYP reserved\n");
+		break;
+	}
+	/* Clear pending wake status bit to avoid immediate wake */
+
+	/* Tri-state specific GPIOS to avoid leakage during S3/S5 */
+
+	/*
+	 * Write back to the SLP register to cause the originally intended
+	 * event again. We need to set BIT13 (SLP_EN) though to make the
+	 * sleep happen.
+	 */
+	enable_pm1_control(SLP_EN);
+
+	/* Make sure to stop executing code here for S3/S4/S5 */
+	if (slp_typ > 1)
+		hlt();
+
+	/*
+	 * In most sleep states, the code flow of this function ends at
+	 * the line above. However, if we entered sleep state S1 and wake
+	 * up again, we will continue to execute code in this function.
+	 */
+	reg32 = inl(ACPI_PMIO_BASE + PM1_CNT);
+	if (reg32 & SCI_EN) {
+		/* The OS is not an ACPI OS, so we set the state to S0 */
+		disable_pm1_control(SLP_EN | SLP_TYP);
+	}
+}
+/*
+ * Look for Synchronous IO SMI and use save state from that
+ * core in case we are not running on the same core that
+ * initiated the IO transaction.
+ */
+static em64t100_smm_state_save_area_t *smi_apmc_find_state_save(uint8_t cmd)
+{
+	em64t100_smm_state_save_area_t *state;
+	int node;
+
+	/* Check all nodes looking for the one that issued the IO */
+	for (node = 0; node < CONFIG_MAX_CPUS; node++) {
+		state = smm_get_save_state(node);
+
+		/* Check for Synchronous IO (bit0==1) */
+		if (!(state->io_misc_info & (1 << 0)))
+			continue;
+
+		/* Make sure it was a write (bit4==0) */
+		if (state->io_misc_info & (1 << 4))
+			continue;
+
+		/* Check for APMC IO port */
+		if (((state->io_misc_info >> 16) & 0xff) != APM_CNT)
+			continue;
+
+		/* Check AX against the requested command */
+		if ((state->rax & 0xff) != cmd)
+			continue;
+
+		return state;
+	}
+
+	return NULL;
+}
+
+#if IS_ENABLED(CONFIG_ELOG_GSMI)
+static void southbridge_smi_gsmi(void)
+{
+	u32 *ret, *param;
+	uint8_t sub_command;
+	em64t100_smm_state_save_area_t *io_smi =
+		smi_apmc_find_state_save(ELOG_GSMI_APM_CNT);
+
+	if (!io_smi)
+		return;
+
+	/* Command and return value in EAX */
+	ret = (u32 *)&io_smi->rax;
+	sub_command = (uint8_t)(*ret >> 8);
+
+	/* Parameter buffer in EBX */
+	param = (u32 *)&io_smi->rbx;
+
+	/* drivers/elog/gsmi.c */
+	*ret = gsmi_exec(sub_command, param);
+}
+#endif
+
+static void finalize(void)
+{
+	static int finalize_done;
+
+	if (finalize_done) {
+		printk(BIOS_DEBUG, "SMM already finalized.\n");
+		return;
+	}
+	finalize_done = 1;
+
+#if IS_ENABLED(CONFIG_SPI_FLASH_SMM)
+	/* Re-init SPI driver to handle locked BAR */
+//	spi_init(); TODO
+#endif
+}
+
+static void southbridge_smi_apmc(void)
+{
+	uint8_t reg8;
+	em64t100_smm_state_save_area_t *state;
+
+	/* Emulate B2 register as the FADT / Linux expects it */
+
+	reg8 = inb(APM_CNT);
+	switch (reg8) {
+	case APM_CNT_CST_CONTROL:
+		/*
+		 * Calling this function seems to cause
+		 * some kind of race condition in Linux
+		 * and causes a kernel oops
+		 */
+		printk(BIOS_DEBUG, "C-state control\n");
+		break;
+	case APM_CNT_PST_CONTROL:
+		/*
+		 * Calling this function seems to cause
+		 * some kind of race condition in Linux
+		 * and causes a kernel oops
+		 */
+		printk(BIOS_DEBUG, "P-state control\n");
+		break;
+	case APM_CNT_ACPI_DISABLE:
+		disable_pm1_control(SCI_EN);
+		printk(BIOS_DEBUG, "SMI#: ACPI disabled.\n");
+		break;
+	case APM_CNT_ACPI_ENABLE:
+		enable_pm1_control(SCI_EN);
+		printk(BIOS_DEBUG, "SMI#: ACPI enabled.\n");
+		break;
+	case APM_CNT_GNVS_UPDATE:
+		if (smm_initialized) {
+			printk(BIOS_DEBUG,
+			       "SMI#: SMM structures already initialized!\n");
+			return;
+		}
+		state = smi_apmc_find_state_save(reg8);
+		if (state) {
+			/* EBX in the state save contains the GNVS pointer */
+			gnvs = (struct global_nvs_t *)((uint32_t)state->rbx);
+			smm_initialized = 1;
+			printk(BIOS_DEBUG, "SMI#: Setting GNVS to %p\n", gnvs);
+		}
+		break;
+#if IS_ENABLED(CONFIG_ELOG_GSMI)
+	case ELOG_GSMI_APM_CNT:
+		southbridge_smi_gsmi();
+		break;
+#endif
+	case APM_CNT_FINALIZE:
+		finalize();
+		break;
+	}
+
+	mainboard_smi_apmc(reg8);
+}
+
+static void southbridge_smi_pm1(void)
+{
+	uint16_t pm1_sts = clear_pm1_status();
+
+	/*
+	 * While OSPM is not active, poweroff immediately
+	 * on a power button event.
+	 */
+	if (pm1_sts & PWRBTN_STS) {
+		/* power button pressed */
+#if IS_ENABLED(CONFIG_ELOG_GSMI)
+		elog_add_event(ELOG_TYPE_POWER_BUTTON);
+#endif
+		disable_pm1_control(-1UL);
+		enable_pm1_control(SLP_EN | (SLP_TYP_S5 << SLP_TYP_SHIFT));
+	}
+}
+
+static void southbridge_smi_gpe0(void)
+{
+	clear_gpe_status();
+}
+
+static void southbridge_smi_tco(void)
+{
+	uint32_t tco_sts = clear_tco_status();
+
+	/* Any TCO event? */
+	if (!tco_sts)
+		return;
+
+	if (tco_sts & TCO_TIMEOUT) { /* TIMEOUT */
+		/* Handle TCO timeout */
+		printk(BIOS_DEBUG, "TCO Timeout.\n");
+	}
+}
+
+static void southbridge_smi_periodic(void)
+{
+	uint32_t reg32;
+
+	reg32 = inl(ACPI_PMIO_BASE + SMI_EN);
+
+	/* Are periodic SMIs enabled? */
+	if ((reg32 & PERIODIC_EN) == 0)
+		return;
+	printk(BIOS_DEBUG, "Periodic SMI.\n");
+}
+
+typedef void (*smi_handler_t)(void);
+
+static const smi_handler_t southbridge_smi[32] = {
+	NULL,			  /*  [0] reserved */
+	NULL,			  /*  [1] reserved */
+	NULL,			  /*  [2] BIOS_STS */
+	NULL,			  /*  [3] LEGACY_USB_STS */
+	southbridge_smi_sleep,	  /*  [4] SLP_SMI_STS */
+	southbridge_smi_apmc,	  /*  [5] APM_STS */
+	NULL,			  /*  [6] SWSMI_TMR_STS */
+	NULL,			  /*  [7] reserved */
+	southbridge_smi_pm1,	  /*  [8] PM1_STS */
+	southbridge_smi_gpe0,	  /*  [9] GPE0_STS */
+	NULL,			  /* [10] reserved */
+	NULL,			  /* [11] reserved */
+	NULL,			  /* [12] reserved */
+	southbridge_smi_tco,	  /* [13] TCO_STS */
+	southbridge_smi_periodic, /* [14] PERIODIC_STS */
+	NULL,			  /* [15] SERIRQ_SMI_STS */
+	NULL,			  /* [16] SMBUS_SMI_STS */
+	NULL,			  /* [17] LEGACY_USB2_STS */
+	NULL,			  /* [18] INTEL_USB2_STS */
+	NULL,			  /* [19] reserved */
+	NULL,			  /* [20] PCI_EXP_SMI_STS */
+	NULL,			  /* [21] reserved */
+	NULL,			  /* [22] reserved */
+	NULL,			  /* [23] reserved */
+	NULL,			  /* [24] reserved */
+	NULL,			  /* [25] reserved */
+	NULL,			  /* [26] SPI_STS */
+	NULL,			  /* [27] reserved */
+	NULL,			  /* [28] PUNIT */
+	NULL,			  /* [29] GUNIT */
+	NULL,			  /* [30] reserved */
+	NULL			  /* [31] reserved */
+};
+
+void southbridge_smi_handler(void)
+{
+	int i;
+	uint32_t smi_sts;
+
+	/*
+	 * We need to clear the SMI status registers, or we won't see what's
+	 * happening in the following calls.
+	 */
+	smi_sts = clear_smi_status();
+
+	/* Call SMI sub handler for each of the status bits */
+	for (i = 0; i < ARRAY_SIZE(southbridge_smi); i++) {
+		if (!(smi_sts & (1 << i)))
+			continue;
+
+		if (southbridge_smi[i] != NULL) {
+			southbridge_smi[i]();
+		} else {
+			printk(BIOS_DEBUG,
+			       "SMI_STS[%d] occurred, but no "
+			       "handler available.\n", i);
+		}
+	}
+}
diff --git a/src/soc/intel/common/smmrelocate.c b/src/soc/intel/common/smmrelocate.c
new file mode 100644
index 0000000..83eece1
--- /dev/null
+++ b/src/soc/intel/common/smmrelocate.c
@@ -0,0 +1,436 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc.
+ * Copyright (C) 2015-2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <types.h>
+#include <string.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <cpu/cpu.h>
+#include <cpu/x86/cache.h>
+#include <cpu/x86/lapic.h>
+#include <cpu/x86/mp.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/smm.h>
+#include <console/console.h>
+#include <soc/cpu.h>
+#include <soc/smm.h>
+
+/* This gets filled in and used during relocation. */
+static struct smm_relocation_params smm_reloc_params;
+
+static inline void write_smrr(struct smm_relocation_params *relo_params)
+{
+	printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
+	       relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
+	wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
+	wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
+}
+
+static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
+{
+	printk(BIOS_DEBUG,
+	       "Writing UNCORE_EMRR. base = 0x%08x, mask=0x%08x\n",
+	       relo_params->uncore_emrr_base.lo,
+	       relo_params->uncore_emrr_mask.lo);
+	wrmsr(MSR_UNCORE_PRMRR_PHYS_BASE, relo_params->uncore_emrr_base);
+	wrmsr(MSR_UNCORE_PRMRR_PHYS_MASK, relo_params->uncore_emrr_mask);
+}
+
+static void update_save_state(int cpu,
+			      struct smm_relocation_params *relo_params,
+			      const struct smm_runtime *runtime)
+{
+	u32 smbase;
+	u32 iedbase;
+
+	/*
+	 * The relocated handler runs with all CPUs concurrently. Therefore
+	 * stagger the entry points adjusting SMBASE downwards by save state
+	 * size * CPU num.
+	 */
+	smbase = relo_params->smram_base - cpu * runtime->save_state_size;
+	iedbase = relo_params->ied_base;
+
+	printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
+	       smbase, iedbase);
+
+	/*
+	 * All threads need to set IEDBASE and SMBASE to the relocated
+	 * handler region. However, the save state location depends on the
+	 * smm_save_state_in_msrs field in the relocation parameters. If
+	 * smm_save_state_in_msrs is non-zero then the CPUs are relocating
+	 * the SMM handler in parallel, and each CPUs save state area is
+	 * located in their respective MSR space. If smm_save_state_in_msrs
+	 * is zero then the SMM relocation is happening serially so the
+	 * save state is at the same default location for all CPUs.
+	 */
+	if (relo_params->smm_save_state_in_msrs) {
+		msr_t smbase_msr;
+		msr_t iedbase_msr;
+
+		smbase_msr.lo = smbase;
+		smbase_msr.hi = 0;
+
+		/*
+		 * According the BWG the IEDBASE MSR is in bits 63:32. It's
+		 * not clear why it differs from the SMBASE MSR.
+		 */
+		iedbase_msr.lo = 0;
+		iedbase_msr.hi = iedbase;
+
+		wrmsr(MSR_SMBASE, smbase_msr);
+		wrmsr(MSR_IEDBASE, iedbase_msr);
+	} else {
+		em64t101_smm_state_save_area_t *save_state;
+
+		save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE -
+				      runtime->save_state_size);
+
+		save_state->smbase = smbase;
+		save_state->iedbase = iedbase;
+	}
+}
+
+/* Returns 1 if SMM MSR save state was set. */
+static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
+{
+	msr_t smm_mca_cap;
+
+	smm_mca_cap = rdmsr(MSR_SMM_MCA_CAP);
+	if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
+		msr_t smm_feature_control;
+
+		smm_feature_control = rdmsr(MSR_SMM_FEATURE_CONTROL);
+		smm_feature_control.hi = 0;
+		smm_feature_control.lo |= SMM_CPU_SAVE_EN;
+		wrmsr(MSR_SMM_FEATURE_CONTROL, smm_feature_control);
+		relo_params->smm_save_state_in_msrs = 1;
+	}
+	return relo_params->smm_save_state_in_msrs;
+}
+
+/*
+ * The relocation work is actually performed in SMM context, but the code
+ * resides in the ramstage module. This occurs by trampolining from the default
+ * SMRAM entry point to here.
+ */
+static void asmlinkage cpu_smm_do_relocation(void *arg)
+{
+	msr_t mtrr_cap;
+	struct smm_relocation_params *relo_params;
+	const struct smm_module_params *p;
+	const struct smm_runtime *runtime;
+	int cpu;
+
+	p = arg;
+	runtime = p->runtime;
+	relo_params = p->arg;
+	cpu = p->cpu;
+
+	if (cpu >= CONFIG_MAX_CPUS) {
+		printk(BIOS_CRIT,
+		       "Invalid CPU number assigned in SMM stub: %d\n", cpu);
+		return;
+	}
+
+	printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
+
+	/*
+	 * Determine if the processor supports saving state in MSRs. If so,
+	 * enable it before the non-BSPs run so that SMM relocation can occur
+	 * in parallel in the non-BSP CPUs.
+	 */
+	if (cpu == 0) {
+		/*
+		 * If smm_save_state_in_msrs is 1 then that means this is the
+		 * 2nd time through the relocation handler for the BSP.
+		 * Parallel SMM handler relocation is taking place. However,
+		 * it is desired to access other CPUs save state in the real
+		 * SMM handler. Therefore, disable the SMM save state in MSRs
+		 * feature.
+		 */
+		if (relo_params->smm_save_state_in_msrs) {
+			msr_t smm_feature_control;
+
+			smm_feature_control = rdmsr(MSR_SMM_FEATURE_CONTROL);
+			smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
+			wrmsr(MSR_SMM_FEATURE_CONTROL, smm_feature_control);
+		} else if (bsp_setup_msr_save_state(relo_params))
+			/*
+			 * Just return from relocation handler if MSR save
+			 * state is enabled. In that case the BSP will come
+			 * back into the relocation handler to setup the new
+			 * SMBASE as well disabling SMM save state in MSRs.
+			 */
+			return;
+	}
+
+	/* Make appropriate changes to the save state map. */
+	update_save_state(cpu, relo_params, runtime);
+
+	/* Write EMRR and SMRR MSRs based on indicated support. */
+	mtrr_cap = rdmsr(MTRR_CAP_MSR);
+	if (mtrr_cap.lo & SMRR_SUPPORTED)
+		write_smrr(relo_params);
+}
+
+static void fill_in_relocation_params(struct smm_relocation_params *params)
+{
+	void *handler_base;
+	size_t handler_size;
+	void *ied_base;
+	size_t ied_size;
+	void *tseg_base;
+	size_t tseg_size;
+	u32 emrr_base;
+	u32 emrr_size;
+	int phys_bits;
+	/* All range registers are aligned to 4KiB */
+	const u32 rmask = ~((1 << 12) - 1);
+
+	/*
+	 * Some of the range registers are dependent on the number of physical
+	 * address bits supported.
+	 */
+	phys_bits = cpuid_eax(0x80000008) & 0xff;
+
+	smm_region(&tseg_base, &tseg_size);
+	smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
+	smm_subregion(SMM_SUBREGION_CHIPSET, &ied_base, &ied_size);
+
+	params->smram_size = handler_size;
+	params->smram_base = (uintptr_t)handler_base;
+
+	params->ied_base = (uintptr_t)ied_base;
+	params->ied_size = ied_size;
+
+	/* SMRR has 32-bits of valid address aligned to 4KiB. */
+	params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
+	params->smrr_base.hi = 0;
+	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
+	params->smrr_mask.hi = 0;
+
+	/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
+	emrr_base = (params->ied_base + (2 << 20)) & rmask;
+	emrr_size = params->ied_size - (2 << 20);
+
+	/*
+	 * EMRR has 46 bits of valid address aligned to 4KiB. It's dependent
+	 * on the number of physical address bits supported.
+	 */
+	params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
+	params->emrr_base.hi = 0;
+	params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
+	params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
+
+	/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
+	params->uncore_emrr_base.lo = emrr_base;
+	params->uncore_emrr_base.hi = 0;
+	params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) |
+					MTRR_PHYS_MASK_VALID;
+	params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1;
+}
+
+static void adjust_apic_id_map(struct smm_loader_params *smm_params)
+{
+	struct smm_runtime *runtime;
+	int i;
+
+	/* Adjust the APIC id map if HT is disabled. */
+	if (!ht_disabled)
+		return;
+
+	runtime = smm_params->runtime;
+
+	/* The APIC ids increment by 2 when HT is disabled. */
+	for (i = 0; i < CONFIG_MAX_CPUS; i++)
+		runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2;
+}
+
+static int install_relocation_handler(int num_cpus,
+				      struct smm_relocation_params *relo_params)
+{
+	/*
+	 * The default SMM entry can happen in parallel or serially. If the
+	 * default SMM entry is done in parallel the BSP has already setup
+	 * the saving state to each CPU's MSRs. At least one save state size
+	 * is required for the initial SMM entry for the BSP to determine if
+	 * parallel SMM relocation is even feasible.  Set the stack size to
+	 * the save state size, and call into the do_relocation handler.
+	 */
+	int save_state_size = sizeof(em64t101_smm_state_save_area_t);
+	struct smm_loader_params smm_params = {
+		.per_cpu_stack_size = save_state_size,
+		.num_concurrent_stacks = num_cpus,
+		.per_cpu_save_state_size = save_state_size,
+		.num_concurrent_save_states = 1,
+		.handler = (smm_handler_t)&cpu_smm_do_relocation,
+		.handler_arg = (void *)relo_params,
+	};
+
+	if (smm_setup_relocation_handler(&smm_params))
+		return -1;
+
+	adjust_apic_id_map(&smm_params);
+
+	return 0;
+}
+
+static void setup_ied_area(struct smm_relocation_params *params)
+{
+	char *ied_base;
+
+	struct ied_header ied = {
+		.signature = "INTEL RSVD",
+		.size = params->ied_size,
+		.reserved = {0},
+	};
+
+	ied_base = (void *)params->ied_base;
+
+	printk(BIOS_DEBUG, "IED base = 0x%08x\n", params->ied_base);
+	printk(BIOS_DEBUG, "IED size = 0x%08x\n", params->ied_size);
+
+	/* Place IED header at IEDBASE. */
+	memcpy(ied_base, &ied, sizeof(ied));
+
+	/* Zero out 32KiB at IEDBASE + 1MiB */
+	memset(ied_base + (1 << 20), 0, (32 << 10));
+}
+
+static int install_permanent_handler(int num_cpus,
+				     struct smm_relocation_params *relo_params)
+{
+	/*
+	 * There are num_cpus concurrent stacks and num_cpus concurrent save
+	 * state areas. Lastly, set the stack size to the save state size.
+	 */
+	int save_state_size = sizeof(em64t101_smm_state_save_area_t);
+	struct smm_loader_params smm_params = {
+		.per_cpu_stack_size = save_state_size,
+		.num_concurrent_stacks = num_cpus,
+		.per_cpu_save_state_size = save_state_size,
+		.num_concurrent_save_states = num_cpus,
+	};
+
+	printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n",
+	       relo_params->smram_base);
+	if (smm_load_module((void *)relo_params->smram_base,
+	    relo_params->smram_size, &smm_params))
+		return -1;
+
+	adjust_apic_id_map(&smm_params);
+
+	return 0;
+}
+
+static int cpu_smm_setup(void)
+{
+	int num_cpus;
+	msr_t msr;
+
+	printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
+
+	fill_in_relocation_params(&smm_reloc_params);
+
+	if (smm_reloc_params.ied_size)
+		setup_ied_area(&smm_reloc_params);
+
+	msr = rdmsr(MSR_CORE_THREAD_COUNT);
+	num_cpus = msr.lo & 0xffff;
+	if (num_cpus > CONFIG_MAX_CPUS) {
+		printk(BIOS_CRIT,
+			"Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n",
+			num_cpus, CONFIG_MAX_CPUS);
+	}
+
+	if (install_relocation_handler(num_cpus, &smm_reloc_params)) {
+		printk(BIOS_CRIT, "SMM Relocation handler install failed.\n");
+		return -1;
+	}
+
+	if (install_permanent_handler(num_cpus, &smm_reloc_params)) {
+		printk(BIOS_CRIT, "SMM Permanent handler install failed.\n");
+		return -1;
+	}
+
+	/* Ensure the SMM handlers hit DRAM before performing first SMI. */
+	wbinvd();
+
+	return 0;
+}
+
+int smm_initialize(void)
+{
+	/* Return early if CPU SMM setup failed. */
+	if (cpu_smm_setup())
+		return -1;
+
+	/* Clear the SMM state in the southbridge. */
+	southbridge_smm_clear_state();
+
+	/* Run the relocation handler. */
+	smm_initiate_relocation();
+
+	if (smm_reloc_params.smm_save_state_in_msrs)
+		printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
+
+	return 0;
+}
+
+void smm_relocate(void)
+{
+	/*
+	 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
+	 * shall take place. Run the relocation handler a second time on the
+	 * BSP to do * the final move. For APs, a relocation handler always
+	 * needs to be run.
+	 */
+	if (smm_reloc_params.smm_save_state_in_msrs)
+		smm_initiate_relocation_parallel();
+	else if (!boot_cpu())
+		smm_initiate_relocation();
+}
+
+void smm_init(void)
+{
+	/*
+	 * smm_init() is normally called from initialize_cpus() in
+	 * lapic_cpu_init.c. However, that path is no longer used. Don't reuse
+	 * the function name because that would cause confusion.
+	 * The smm_initialize() function above is used to setup SMM at the
+	 * appropriate time.
+	 */
+}
+
+void smm_lock(void)
+{
+	/*
+	 * LOCK the SMM memory window and enable normal SMM.
+	 * After running this function, only a full reset can
+	 * make the SMM registers writable again.
+	 */
+	printk(BIOS_DEBUG, "Locking SMM.\n");
+#if ENV_RAMSTAGE
+	write32((void *)(CONFIG_MMCONF_BASE_ADDRESS + SMRAM),
+		D_LCK | G_SMRAME | C_BASE_SEG);
+#else
+	pci_write_config8(PCI_DEV(0, 0, 0), SMRAM, D_LCK | G_SMRAME
+		| C_BASE_SEG);
+#endif
+}



More information about the coreboot-gerrit mailing list