[coreboot-gerrit] Change in coreboot[master]: soc/intel/common/block: [WIP]Add Intel common CPU code

Barnali Sarkar (Code Review) gerrit at coreboot.org
Mon May 22 18:01:54 CEST 2017


Barnali Sarkar has uploaded a new change for review. ( https://review.coreboot.org/19540 )

Change subject: soc/intel/common/block: [WIP]Add Intel common CPU code
......................................................................

soc/intel/common/block: [WIP]Add Intel common CPU code

Create Intel Common CPU programming code. Here we do the
following things -

* Cache the BIOS region
* Set FLEX_RATIO to TDP Nominal
* Set Maximum Frequency

Currently, the code is only upto Bootblock stage.
More code will get added up in the subsequent phases.

Change-Id: I2f80c42132d9ea738be4051d2395e9e51ac153f8
Signed-off-by: Barnali Sarkar <barnali.sarkar at intel.com>
---
M src/soc/intel/common/block/cpu/Kconfig
M src/soc/intel/common/block/cpu/Makefile.inc
A src/soc/intel/common/block/cpu/cpu.c
A src/soc/intel/common/block/cpu/cpu_early.c
A src/soc/intel/common/block/cpu/smmrelocate.c
A src/soc/intel/common/block/include/intelblocks/cpu.h
6 files changed, 1,079 insertions(+), 0 deletions(-)


  git pull ssh://review.coreboot.org:29418/coreboot refs/changes/40/19540/4

diff --git a/src/soc/intel/common/block/cpu/Kconfig b/src/soc/intel/common/block/cpu/Kconfig
index 7b78c53..955de87 100644
--- a/src/soc/intel/common/block/cpu/Kconfig
+++ b/src/soc/intel/common/block/cpu/Kconfig
@@ -1,3 +1,8 @@
+config SOC_INTEL_COMMON_BLOCK_CPU
+	bool
+	help
+	  Intel Common CPU Model Support.
+
 config SOC_INTEL_COMMON_BLOCK_CAR
 	bool
 	default n
diff --git a/src/soc/intel/common/block/cpu/Makefile.inc b/src/soc/intel/common/block/cpu/Makefile.inc
index abdff2f..1510e2c 100644
--- a/src/soc/intel/common/block/cpu/Makefile.inc
+++ b/src/soc/intel/common/block/cpu/Makefile.inc
@@ -1,3 +1,11 @@
 bootblock-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CAR) += car/cache_as_ram.S
+bootblock-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU) += cpu_early.c
+
 postcar-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CAR) += car/exit_car.S
+
 romstage-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CAR) += car/exit_car.S
+romstage-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU) += cpu_early.c
+
+ramstage-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU) += cpu_early.c
+ramstage-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU) += cpu.c
+ramstage-$(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU) += smmrelocate.c
diff --git a/src/soc/intel/common/block/cpu/cpu.c b/src/soc/intel/common/block/cpu/cpu.c
new file mode 100644
index 0000000..45987cf
--- /dev/null
+++ b/src/soc/intel/common/block/cpu/cpu.c
@@ -0,0 +1,548 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2007-2009 coresystems GmbH
+ * Copyright (C) 2014 Google Inc.
+ * Copyright (C) 2015-2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <assert.h>
+#include <bootstate.h>
+#include <console/console.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <string.h>
+#include <chip.h>
+#include <cpu/cpu.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/lapic.h>
+#include <cpu/x86/mp.h>
+#include <cpu/intel/microcode.h>
+#include <cpu/intel/speedstep.h>
+#include <cpu/intel/turbo.h>
+#include <cpu/x86/cache.h>
+#include <cpu/x86/name.h>
+#include <cpu/x86/smm.h>
+#include <delay.h>
+#include <intelblocks/cpu.h>
+#include <pc80/mc146818rtc.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+#include <soc/pci_devs.h>
+#include <soc/ramstage.h>
+#include <soc/smm.h>
+#include <soc/systemagent.h>
+
+/* MP initialization support. */
+static const void *microcode_patch;
+static int ht_disabled;
+
+/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
+static const u8 power_limit_time_sec_to_msr[] = {
+	[0]   = 0x00,
+	[1]   = 0x0a,
+	[2]   = 0x0b,
+	[3]   = 0x4b,
+	[4]   = 0x0c,
+	[5]   = 0x2c,
+	[6]   = 0x4c,
+	[7]   = 0x6c,
+	[8]   = 0x0d,
+	[10]  = 0x2d,
+	[12]  = 0x4d,
+	[14]  = 0x6d,
+	[16]  = 0x0e,
+	[20]  = 0x2e,
+	[24]  = 0x4e,
+	[28]  = 0x6e,
+	[32]  = 0x0f,
+	[40]  = 0x2f,
+	[48]  = 0x4f,
+	[56]  = 0x6f,
+	[64]  = 0x10,
+	[80]  = 0x30,
+	[96]  = 0x50,
+	[112] = 0x70,
+	[128] = 0x11,
+};
+
+/* Convert POWER_LIMIT_1_TIME MSR value to seconds */
+static const u8 power_limit_time_msr_to_sec[] = {
+	[0x00] = 0,
+	[0x0a] = 1,
+	[0x0b] = 2,
+	[0x4b] = 3,
+	[0x0c] = 4,
+	[0x2c] = 5,
+	[0x4c] = 6,
+	[0x6c] = 7,
+	[0x0d] = 8,
+	[0x2d] = 10,
+	[0x4d] = 12,
+	[0x6d] = 14,
+	[0x0e] = 16,
+	[0x2e] = 20,
+	[0x4e] = 24,
+	[0x6e] = 28,
+	[0x0f] = 32,
+	[0x2f] = 40,
+	[0x4f] = 48,
+	[0x6f] = 56,
+	[0x10] = 64,
+	[0x30] = 80,
+	[0x50] = 96,
+	[0x70] = 112,
+	[0x11] = 128,
+};
+
+/*
+ * Configure processor power limits if possible
+ * This must be done AFTER set of BIOS_RESET_CPL
+ */
+void set_power_limits(u8 power_limit_1_time)
+{
+	msr_t msr = rdmsr(MSR_PLATFORM_INFO);
+	msr_t limit;
+	unsigned int power_unit;
+	unsigned int tdp, min_power, max_power, max_time, tdp_pl2;
+	u8 power_limit_1_val;
+	device_t dev = SA_DEV_ROOT;
+	config_t *conf = dev->chip_info;
+
+	if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
+		power_limit_1_time = 28;
+
+	if (!(msr.lo & PLATFORM_INFO_SET_TDP))
+		return;
+
+	/* Get units */
+	msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
+	power_unit = 1 << (msr.lo & 0xf);
+
+	/* Get power defaults for this SKU */
+	msr = rdmsr(MSR_PKG_POWER_SKU);
+	tdp = msr.lo & 0x7fff;
+	min_power = (msr.lo >> 16) & 0x7fff;
+	max_power = msr.hi & 0x7fff;
+	max_time = (msr.hi >> 16) & 0x7f;
+
+	printk(BIOS_DEBUG, "CPU TDP: %u Watts\n", tdp / power_unit);
+
+	if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
+		power_limit_1_time = power_limit_time_msr_to_sec[max_time];
+
+	if (min_power > 0 && tdp < min_power)
+		tdp = min_power;
+
+	if (max_power > 0 && tdp > max_power)
+		tdp = max_power;
+
+	power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
+
+	/* Set long term power limit to TDP */
+	limit.lo = 0;
+	limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
+
+	/* Set PL1 Pkg Power clamp bit */
+	limit.lo |= PKG_POWER_LIMIT_CLAMP;
+
+	limit.lo |= PKG_POWER_LIMIT_EN;
+	limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
+		PKG_POWER_LIMIT_TIME_SHIFT;
+
+	/* Set short term power limit to 1.25 * TDP */
+	limit.hi = 0;
+	tdp_pl2 = (conf->tdp_pl2_override == 0) ?
+		(tdp * 125) / 100 : (conf->tdp_pl2_override * power_unit);
+	limit.hi |= (tdp_pl2) & PKG_POWER_LIMIT_MASK;
+	limit.hi |= PKG_POWER_LIMIT_CLAMP;
+	limit.hi |= PKG_POWER_LIMIT_EN;
+
+	/* Power limit 2 time is only programmable on server SKU */
+	wrmsr(MSR_PKG_POWER_LIMIT, limit);
+
+	/* Set PL2 power limit values in MCHBAR and disable PL1 */
+	MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = limit.lo & (~(PKG_POWER_LIMIT_EN));
+	MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = limit.hi;
+
+	/* Set DDR RAPL power limit by copying from MMIO to MSR */
+	msr.lo = MCHBAR32(MCH_DDR_POWER_LIMIT_LO);
+	msr.hi = MCHBAR32(MCH_DDR_POWER_LIMIT_HI);
+	wrmsr(MSR_DDR_RAPL_LIMIT, msr);
+
+	/* Use nominal TDP values for CPUs with configurable TDP */
+	if (cpu_config_tdp_levels()) {
+		msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+		limit.hi = 0;
+		limit.lo = msr.lo & 0xff;
+		wrmsr(MSR_TURBO_ACTIVATION_RATIO, limit);
+	}
+}
+
+static void configure_thermal_target(void)
+{
+	device_t dev = SA_DEV_ROOT;
+	config_t *conf = dev->chip_info;
+	msr_t msr;
+
+	/* Set TCC activation offset if supported */
+	msr = rdmsr(MSR_PLATFORM_INFO);
+	if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
+		msr = rdmsr(MSR_TEMPERATURE_TARGET);
+		msr.lo &= ~(0xf << 24); /* Bits 27:24 */
+		msr.lo |= (conf->tcc_offset & 0xf) << 24;
+		wrmsr(MSR_TEMPERATURE_TARGET, msr);
+	}
+	msr = rdmsr(MSR_TEMPERATURE_TARGET);
+	msr.lo &= ~0x7f; /* Bits 6:0 */
+	msr.lo |= 0xe6; /* setting 100ms thermal time window */
+	wrmsr(MSR_TEMPERATURE_TARGET, msr);
+}
+
+static void configure_isst(void)
+{
+	device_t dev = SA_DEV_ROOT;
+	config_t *conf = dev->chip_info;
+	msr_t msr;
+
+	if (conf->speed_shift_enable) {
+		/*
+		* Kernel driver checks CPUID.06h:EAX[Bit 7] to determine if HWP
+		  is supported or not. Coreboot needs to configure MSR 0x1AA
+		  which is then reflected in the CPUID register.
+		*/
+		msr = rdmsr(MSR_MISC_PWR_MGMT);
+		msr.lo |= MISC_PWR_MGMT_ISST_EN; /* Enable Speed Shift */
+		msr.lo |= MISC_PWR_MGMT_ISST_EN_INT; /* Enable Interrupt */
+		msr.lo |= MISC_PWR_MGMT_ISST_EN_EPP; /* Enable EPP */
+		wrmsr(MSR_MISC_PWR_MGMT, msr);
+	} else {
+		msr = rdmsr(MSR_MISC_PWR_MGMT);
+		msr.lo &= ~MISC_PWR_MGMT_ISST_EN; /* Disable Speed Shift */
+		msr.lo &= ~MISC_PWR_MGMT_ISST_EN_INT; /* Disable Interrupt */
+		msr.lo &= ~MISC_PWR_MGMT_ISST_EN_EPP; /* Disable EPP */
+		wrmsr(MSR_MISC_PWR_MGMT, msr);
+	}
+}
+
+static void configure_misc(void)
+{
+	msr_t msr;
+
+	msr = rdmsr(IA32_MISC_ENABLE);
+	msr.lo |= (1 << 0);	/* Fast String enable */
+	msr.lo |= (1 << 3);	/* TM1/TM2/EMTTM enable */
+	msr.lo |= (1 << 16);	/* Enhanced SpeedStep Enable */
+	wrmsr(IA32_MISC_ENABLE, msr);
+
+	/* Disable Thermal interrupts */
+	msr.lo = 0;
+	msr.hi = 0;
+	wrmsr(IA32_THERM_INTERRUPT, msr);
+
+	/* Enable package critical interrupt only */
+	msr.lo = 1 << 4;
+	msr.hi = 0;
+	wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
+
+	/* Enable PROCHOT */
+	msr = rdmsr(MSR_POWER_CTL);
+	msr.lo |= (1 << 0);	/* Enable Bi-directional PROCHOT as an input*/
+	msr.lo |= (1 << 23);	/* Lock it */
+	wrmsr(MSR_POWER_CTL, msr);
+}
+
+static void enable_lapic_tpr(void)
+{
+	msr_t msr;
+
+	msr = rdmsr(MSR_PIC_MSG_CONTROL);
+	msr.lo &= ~(1 << 10);	/* Enable APIC TPR updates */
+	wrmsr(MSR_PIC_MSG_CONTROL, msr);
+}
+
+static void configure_dca_cap(void)
+{
+	struct cpuid_result cpuid_regs;
+	msr_t msr;
+
+	/* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
+	cpuid_regs = cpuid(1);
+	if (cpuid_regs.ecx & (1 << 18)) {
+		msr = rdmsr(IA32_PLATFORM_DCA_CAP);
+		msr.lo |= 1;
+		wrmsr(IA32_PLATFORM_DCA_CAP, msr);
+	}
+}
+
+static void set_max_ratio(void)
+{
+	msr_t msr, perf_ctl;
+
+	perf_ctl.hi = 0;
+
+	/* Check for configurable TDP option */
+	if (get_turbo_state() == TURBO_ENABLED) {
+		msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
+		perf_ctl.lo = (msr.lo & 0xff) << 8;
+	} else if (cpu_config_tdp_levels()) {
+		/* Set to nominal TDP ratio */
+		msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+		perf_ctl.lo = (msr.lo & 0xff) << 8;
+	} else {
+		/* Platform Info bits 15:8 give max ratio */
+		msr = rdmsr(MSR_PLATFORM_INFO);
+		perf_ctl.lo = msr.lo & 0xff00;
+	}
+	wrmsr(IA32_PERF_CTL, perf_ctl);
+
+	printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
+	       ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
+}
+
+static void set_energy_perf_bias(u8 policy)
+{
+	msr_t msr;
+	int ecx;
+
+	/* Determine if energy efficient policy is supported. */
+	ecx = cpuid_ecx(0x6);
+	if (!(ecx & (1 << 3)))
+		return;
+
+	/* Energy Policy is bits 3:0 */
+	msr = rdmsr(IA32_ENERGY_PERFORMANCE_BIAS);
+	msr.lo &= ~0xf;
+	msr.lo |= policy & 0xf;
+	wrmsr(IA32_ENERGY_PERFORMANCE_BIAS, msr);
+
+	printk(BIOS_DEBUG, "cpu: energy policy set to %u\n", policy);
+}
+
+static void configure_mca(void)
+{
+	msr_t msr;
+	int i;
+	int num_banks;
+
+	msr = rdmsr(IA32_MCG_CAP);
+	num_banks = msr.lo & 0xff;
+	msr.lo = msr.hi = 0;
+	/*
+	 * TODO(adurbin): This should only be done on a cold boot. Also, some
+	 * of these banks are core vs package scope. For now every CPU clears
+	 * every bank.
+	 */
+	for (i = 0; i < num_banks; i++) {
+		/* Clear the machine check status */
+		wrmsr(IA32_MC0_STATUS + (i * 4), msr);
+		/* Initialize machine checks */
+		wrmsr(IA32_MC0_CTL + i * 4,
+			(msr_t) {.lo = 0xffffffff, .hi = 0xffffffff});
+	}
+}
+
+/* All CPUs including BSP will run the following function. */
+static void cpu_core_init(device_t cpu)
+{
+	/* Clear out pending MCEs */
+	configure_mca();
+
+	/* Enable the local CPU apics */
+	enable_lapic_tpr();
+	setup_lapic();
+
+	/* Configure Enhanced SpeedStep and Thermal Sensors */
+	configure_misc();
+
+	/* Configure Intel Speed Shift */
+	configure_isst();
+
+	/* Enable Direct Cache Access */
+	configure_dca_cap();
+
+	/* Set energy policy */
+	set_energy_perf_bias(ENERGY_POLICY_NORMAL);
+
+	/* Enable Turbo */
+	enable_turbo();
+
+	/* Configure SGX */
+	configure_sgx(microcode_patch);
+}
+
+static struct device_operations cpu_dev_ops = {
+	.init = cpu_core_init,
+};
+
+static struct cpu_device_id cpu_table[] = {
+	{ X86_VENDOR_INTEL, CPUID_SKYLAKE_C0 },
+	{ X86_VENDOR_INTEL, CPUID_SKYLAKE_D0 },
+	{ X86_VENDOR_INTEL, CPUID_SKYLAKE_HQ0 },
+	{ X86_VENDOR_INTEL, CPUID_SKYLAKE_HR0 },
+	{ X86_VENDOR_INTEL, CPUID_KABYLAKE_G0 },
+	{ X86_VENDOR_INTEL, CPUID_KABYLAKE_H0 },
+	{ X86_VENDOR_INTEL, CPUID_KABYLAKE_Y0 },
+	{ X86_VENDOR_INTEL, CPUID_KABYLAKE_HA0 },
+	{ X86_VENDOR_INTEL, CPUID_KABYLAKE_HB0 },
+	{ 0, 0 },
+};
+
+static const struct cpu_driver driver __cpu_driver = {
+	.ops      = &cpu_dev_ops,
+	.id_table = cpu_table,
+};
+
+static int get_cpu_count(void)
+{
+	msr_t msr;
+	int num_threads;
+	int num_cores;
+
+	msr = rdmsr(MSR_CORE_THREAD_COUNT);
+	num_threads = (msr.lo >> 0) & 0xffff;
+	num_cores = (msr.lo >> 16) & 0xffff;
+	printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
+	       num_cores, num_threads);
+
+	ht_disabled = num_threads == num_cores;
+
+	return num_threads;
+}
+
+static void get_microcode_info(const void **microcode, int *parallel)
+{
+	microcode_patch = intel_microcode_find();
+	*microcode = microcode_patch;
+	*parallel = 1;
+}
+
+static int adjust_apic_id(int index, int apic_id)
+{
+	if (ht_disabled)
+		return 2 * index;
+	else
+		return index;
+}
+
+/* Check whether the current CPU is the sibling hyperthread. */
+int is_secondary_thread(void)
+{
+	int apic_id;
+	apic_id = lapicid();
+
+	if (!ht_disabled && (apic_id & 1))
+		return 1;
+	return 0;
+}
+
+static void per_cpu_smm_trigger(void)
+{
+	/* Relocate the SMM handler. */
+	smm_relocate();
+
+	/* After SMM relocation a 2nd microcode load is required. */
+	intel_microcode_load_unlocked(microcode_patch);
+}
+
+static void post_mp_init(void)
+{
+	/* Set Max Ratio */
+	set_max_ratio();
+
+	/*
+	 * Now that all APs have been relocated as well as the BSP let SMIs
+	 * start flowing.
+	 */
+	southbridge_smm_enable_smi();
+
+	/* Lock down the SMRAM space. */
+#if IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)
+	smm_lock();
+#endif
+}
+
+static const struct mp_ops mp_ops = {
+	/*
+	 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
+	 * that are set prior to ramstage.
+	 * Real MTRRs programming are being done after resource allocation.
+	 */
+	.pre_mp_init = soc_fsp_load,
+	.get_cpu_count = get_cpu_count,
+	.get_smm_info = smm_info,
+	.get_microcode_info = get_microcode_info,
+	.adjust_cpu_apic_entry = adjust_apic_id,
+	.pre_mp_smm_init = smm_initialize,
+	.per_cpu_smm_trigger = per_cpu_smm_trigger,
+	.relocation_handler = smm_relocation_handler,
+	.post_mp_init = post_mp_init,
+};
+
+static void soc_init_cpus(void *unused)
+{
+	device_t dev = dev_find_path(NULL, DEVICE_PATH_CPU_CLUSTER);
+	assert(dev != NULL);
+	struct bus *cpu_bus = dev->link_list;
+
+	if (mp_init_with_smm(cpu_bus, &mp_ops))
+		printk(BIOS_ERR, "MP initialization failure.\n");
+
+	/* Thermal throttle activation offset */
+	configure_thermal_target();
+
+	/*
+	 * TODO: somehow calling configure_sgx() in cpu_core_init() is not
+	 * successful on the BSP (other threads are fine). Have to run it again
+	 * here to get SGX enabled on BSP. This behavior needs to root-caused
+	 * and we shall not have this redundant call.
+	 */
+	configure_sgx(microcode_patch);
+}
+
+/* Ensure to re-program all MTRRs based on DRAM resource settings */
+static void soc_post_cpus_init(void *unused)
+{
+	if (mp_run_on_all_cpus(&x86_setup_mtrrs_with_detect, 1000) < 0)
+		printk(BIOS_ERR, "MTRR programming failure\n");
+}
+
+int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
+{
+	msr_t msr1;
+	msr_t msr2;
+
+	/*
+	 * If PRMRR/SGX is supported the FIT microcode load will set the msr
+	 * 0x08b with the Patch revision id one less than the id in the
+	 * microcode binary. The PRMRR support is indicated in the MSR
+	 * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the
+	 * same microcode during CPU initialization. If SGX is enabled, as
+	 * part of SGX BIOS initialization steps, the same microcode needs to
+	 * be reloaded after the core PRMRR MSRs are programmed.
+	 */
+	msr1 = rdmsr(MTRR_CAP_MSR);
+	msr2 = rdmsr(PRMRR_PHYS_BASE_MSR);
+	if (msr2.lo && (current_patch_id == new_patch_id - 1))
+		return 0;
+	else
+		return (msr1.lo & PRMRR_SUPPORTED) &&
+			(current_patch_id == new_patch_id - 1);
+}
+
+/*
+ * Do CPU MP Init before FSP Silicon Init
+ */
+BOOT_STATE_INIT_ENTRY(BS_DEV_INIT_CHIPS, BS_ON_ENTRY, soc_init_cpus, NULL);
+BOOT_STATE_INIT_ENTRY(BS_DEV_INIT, BS_ON_EXIT, soc_post_cpus_init, NULL);
+
diff --git a/src/soc/intel/common/block/cpu/cpu_early.c b/src/soc/intel/common/block/cpu/cpu_early.c
new file mode 100644
index 0000000..11138f0
--- /dev/null
+++ b/src/soc/intel/common/block/cpu/cpu_early.c
@@ -0,0 +1,172 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc.
+ * Copyright (C) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <arch/io.h>
+#include <console/console.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mtrr.h>
+#include <delay.h>
+#include <intelblocks/cpu.h>
+#include <intelblocks/fast_spi.h>
+#include <lib.h>
+#include <reset.h>
+#include <soc/bootblock.h>
+#include <soc/cpu.h>
+#include <soc/iomap.h>
+#include <soc/msr.h>
+#include <soc/pci_devs.h>
+#include <stdint.h>
+
+/* Soft Reset Data Register Bit 12 = MAX Boot Frequency */
+#define STRAP_MAX_FREQ	(1<<12)
+/* Soft Reset Data Register Bit 6-11 = Flex Ratio */
+#define FLEX_RATIO_BIT	6
+
+static void set_pch_cpu_strap(u8 flex_ratio)
+{
+	u32 soft_reset_data;
+
+	/* Soft Reset Data Register Bit 12 = MAX Boot Frequency
+	 * Bit 6-11 = Flex Ratio
+	 * Soft Reset Data register located at SPIBAR0 offset 0xF8[0:15].
+	 */
+	soft_reset_data = STRAP_MAX_FREQ;
+	soft_reset_data |= (flex_ratio << FLEX_RATIO_BIT);
+	set_strap_msg_data(soft_reset_data);
+}
+
+void set_flex_ratio_to_tdp_nominal(void)
+{
+	msr_t flex_ratio, msr;
+	u8 nominal_ratio;
+
+	/* Check for Flex Ratio support */
+	flex_ratio = rdmsr(MSR_FLEX_RATIO);
+	if (!(flex_ratio.lo & FLEX_RATIO_EN))
+		return;
+
+	/* Check for >0 configurable TDPs */
+	msr = rdmsr(MSR_PLATFORM_INFO);
+	if (((msr.hi >> 1) & 3) == 0)
+		return;
+
+	/* Use nominal TDP ratio for flex ratio */
+	msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+	nominal_ratio = msr.lo & 0xff;
+
+	/* See if flex ratio is already set to nominal TDP ratio */
+	if (((flex_ratio.lo >> 8) & 0xff) == nominal_ratio)
+		return;
+
+	/* Set flex ratio to nominal TDP ratio */
+	flex_ratio.lo &= ~0xff00;
+	flex_ratio.lo |= nominal_ratio << 8;
+	flex_ratio.lo |= FLEX_RATIO_LOCK;
+	wrmsr(MSR_FLEX_RATIO, flex_ratio);
+
+	/* Set PCH Soft Reset Data Register with new Flex Ratio */
+	set_pch_cpu_strap(nominal_ratio);
+
+	/* Delay before reset to avoid potential TPM lockout */
+	mdelay(30);
+
+	/* Issue soft reset, will be "CPU only" due to soft reset data */
+	soft_reset();
+}
+
+#if !ENV_RAMSTAGE
+void cache_bios_region(void)
+{
+	int mtrr;
+	size_t rom_size;
+	uint32_t alignment;
+
+	mtrr = get_free_var_mtrr();
+
+	if (mtrr == -1)
+		return;
+
+	/* Only the IFD BIOS region is memory mapped (at top of 4G) */
+	fast_spi_get_bios_region(&rom_size);
+
+	if (!rom_size)
+		return;
+
+	/* Round to power of two */
+	alignment = 1 << (log2_ceil(rom_size));
+	rom_size = ALIGN_UP(rom_size, alignment);
+	set_var_mtrr(mtrr, 4ULL*GiB - rom_size, rom_size, MTRR_TYPE_WRPROT);
+}
+#endif
+
+/*
+ * Read PLATFORM_INFO MSR (0xCE).
+ * Return Value of Bit 34:33 (CONFIG_TDP_LEVELS).
+ *
+ * Possible values of Bit 34:33 are -
+ * 00 : Config TDP not supported
+ * 01 : One Additional TDP level supported
+ * 10 : Two Additional TDP level supported
+ * 11 : Reserved 
+ */
+int cpu_config_tdp_levels(void)
+{
+	msr_t platform_info;
+
+	/* Bits 34:33 indicate how many levels supported */
+	platform_info = rdmsr(MSR_PLATFORM_INFO);
+	return (platform_info.hi >> 1) & 3;
+}
+
+/*
+ * CONFIG_TDP_NOMINAL MSR (0x648) Bits 7:0 tells Nominal
+ * TDP level ratio to be used for specific processor (in units
+ * of 100MHz).
+ *
+ * Set PERF_CTL MSR (0x199) P_Req (14:8 bits) with that value.
+ */
+void set_P_State_to_nominal_TDP_ratio(void)
+{
+	msr_t msr, perf_ctl;
+
+	msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+	perf_ctl.lo = (msr.lo & 0xff) << 8;
+	perf_ctl.hi = 0;
+
+	wrmsr(MSR_IA32_PERF_CTL, perf_ctl);
+	printk(BIOS_DEBUG, "CPU: frequency set to %d MHz\n",
+		((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
+}
+
+/*
+ * PLATFORM_INFO MSR (0xCE) Bits 15:8 tells 
+ * MAX_NON_TURBO_LIM_RATIO.
+ *
+ * Set PERF_CTL MSR (0x199) P_Req (14:8 bits) with that value.
+ */
+void set_P_State_to_max_non_turbo_ratio(void)
+{
+	msr_t msr, perf_ctl;
+
+	/* Platform Info bits 15:8 give max ratio */
+	msr = rdmsr(MSR_PLATFORM_INFO);
+	perf_ctl.lo = msr.lo & 0xff00;
+	perf_ctl.hi = 0;
+
+	wrmsr(MSR_IA32_PERF_CTL, perf_ctl);
+	printk(BIOS_DEBUG, "CPU: frequency set to %d MHz\n",
+		((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
+}
diff --git a/src/soc/intel/common/block/cpu/smmrelocate.c b/src/soc/intel/common/block/cpu/smmrelocate.c
new file mode 100644
index 0000000..482107b
--- /dev/null
+++ b/src/soc/intel/common/block/cpu/smmrelocate.c
@@ -0,0 +1,319 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc.
+ * Copyright (C) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <types.h>
+#include <string.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <cpu/cpu.h>
+#include <cpu/x86/cache.h>
+#include <cpu/x86/lapic.h>
+#include <cpu/x86/mp.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/smm.h>
+#include <console/console.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+#include <soc/pci_devs.h>
+#include <soc/smm.h>
+#include <soc/systemagent.h>
+#include "chip.h"
+
+/* This gets filled in and used during relocation. */
+static struct smm_relocation_params smm_reloc_params;
+
+static inline void write_smrr(struct smm_relocation_params *relo_params)
+{
+	printk(BIOS_DEBUG, "Writing SMRR. base = 0x%08x, mask=0x%08x\n",
+	       relo_params->smrr_base.lo, relo_params->smrr_mask.lo);
+	wrmsr(SMRR_PHYS_BASE, relo_params->smrr_base);
+	wrmsr(SMRR_PHYS_MASK, relo_params->smrr_mask);
+}
+
+static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
+{
+	printk(BIOS_DEBUG,
+	       "Writing UNCORE_EMRR. base = 0x%08x, mask=0x%08x\n",
+	       relo_params->uncore_emrr_base.lo,
+	       relo_params->uncore_emrr_mask.lo);
+	wrmsr(UNCORE_PRMRR_PHYS_BASE_MSR, relo_params->uncore_emrr_base);
+	wrmsr(UNCORE_PRMRR_PHYS_MASK_MSR, relo_params->uncore_emrr_mask);
+}
+
+static void update_save_state(int cpu, uintptr_t curr_smbase,
+				uintptr_t staggered_smbase,
+				struct smm_relocation_params *relo_params)
+{
+	u32 smbase;
+	u32 iedbase;
+
+	/*
+	 * The relocated handler runs with all CPUs concurrently. Therefore
+	 * stagger the entry points adjusting SMBASE downwards by save state
+	 * size * CPU num.
+	 */
+	smbase = staggered_smbase;
+	iedbase = relo_params->ied_base;
+
+	printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
+	       smbase, iedbase);
+
+	/*
+	 * All threads need to set IEDBASE and SMBASE to the relocated
+	 * handler region. However, the save state location depends on the
+	 * smm_save_state_in_msrs field in the relocation parameters. If
+	 * smm_save_state_in_msrs is non-zero then the CPUs are relocating
+	 * the SMM handler in parallel, and each CPUs save state area is
+	 * located in their respective MSR space. If smm_save_state_in_msrs
+	 * is zero then the SMM relocation is happening serially so the
+	 * save state is at the same default location for all CPUs.
+	 */
+	if (relo_params->smm_save_state_in_msrs) {
+		msr_t smbase_msr;
+		msr_t iedbase_msr;
+
+		smbase_msr.lo = smbase;
+		smbase_msr.hi = 0;
+
+		/*
+		 * According the BWG the IEDBASE MSR is in bits 63:32. It's
+		 * not clear why it differs from the SMBASE MSR.
+		 */
+		iedbase_msr.lo = 0;
+		iedbase_msr.hi = iedbase;
+
+		wrmsr(SMBASE_MSR, smbase_msr);
+		wrmsr(IEDBASE_MSR, iedbase_msr);
+	} else {
+		em64t101_smm_state_save_area_t *save_state;
+
+		save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
+				      sizeof(*save_state));
+
+		save_state->smbase = smbase;
+		save_state->iedbase = iedbase;
+	}
+}
+
+/* Returns 1 if SMM MSR save state was set. */
+static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
+{
+	msr_t smm_mca_cap;
+
+	smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
+	if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
+		msr_t smm_feature_control;
+
+		smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
+		smm_feature_control.hi = 0;
+		smm_feature_control.lo |= SMM_CPU_SAVE_EN;
+		wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
+		relo_params->smm_save_state_in_msrs = 1;
+	}
+	return relo_params->smm_save_state_in_msrs;
+}
+
+/*
+ * The relocation work is actually performed in SMM context, but the code
+ * resides in the ramstage module. This occurs by trampolining from the default
+ * SMRAM entry point to here.
+ */
+void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
+				uintptr_t staggered_smbase)
+{
+	msr_t mtrr_cap;
+	struct smm_relocation_params *relo_params = &smm_reloc_params;
+
+	printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
+
+	/*
+	 * Determine if the processor supports saving state in MSRs. If so,
+	 * enable it before the non-BSPs run so that SMM relocation can occur
+	 * in parallel in the non-BSP CPUs.
+	 */
+	if (cpu == 0) {
+		/*
+		 * If smm_save_state_in_msrs is 1 then that means this is the
+		 * 2nd time through the relocation handler for the BSP.
+		 * Parallel SMM handler relocation is taking place. However,
+		 * it is desired to access other CPUs save state in the real
+		 * SMM handler. Therefore, disable the SMM save state in MSRs
+		 * feature.
+		 */
+		if (relo_params->smm_save_state_in_msrs) {
+			msr_t smm_feature_control;
+
+			smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
+			smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
+			wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
+		} else if (bsp_setup_msr_save_state(relo_params))
+			/*
+			 * Just return from relocation handler if MSR save
+			 * state is enabled. In that case the BSP will come
+			 * back into the relocation handler to setup the new
+			 * SMBASE as well disabling SMM save state in MSRs.
+			 */
+			return;
+	}
+
+	/* Make appropriate changes to the save state map. */
+	update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
+
+	/* Write EMRR and SMRR MSRs based on indicated support. */
+	mtrr_cap = rdmsr(MTRR_CAP_MSR);
+	if (mtrr_cap.lo & SMRR_SUPPORTED)
+		write_smrr(relo_params);
+}
+
+static void fill_in_relocation_params(device_t dev,
+				      struct smm_relocation_params *params)
+{
+	void *handler_base;
+	size_t handler_size;
+	void *ied_base;
+	size_t ied_size;
+	void *tseg_base;
+	size_t tseg_size;
+	u32 emrr_base;
+	u32 emrr_size;
+	int phys_bits;
+	/* All range registers are aligned to 4KiB */
+	const u32 rmask = ~((1 << 12) - 1);
+
+	/*
+	 * Some of the range registers are dependent on the number of physical
+	 * address bits supported.
+	 */
+	phys_bits = cpuid_eax(0x80000008) & 0xff;
+
+	smm_region(&tseg_base, &tseg_size);
+	smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
+	smm_subregion(SMM_SUBREGION_CHIPSET, &ied_base, &ied_size);
+
+	params->smram_size = handler_size;
+	params->smram_base = (uintptr_t)handler_base;
+
+	params->ied_base = (uintptr_t)ied_base;
+	params->ied_size = ied_size;
+
+	/* SMRR has 32-bits of valid address aligned to 4KiB. */
+	params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
+	params->smrr_base.hi = 0;
+	params->smrr_mask.lo = (~(tseg_size - 1) & rmask)
+		| MTRR_PHYS_MASK_VALID;
+	params->smrr_mask.hi = 0;
+
+	/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
+	emrr_base = (params->ied_base + (2 << 20)) & rmask;
+	emrr_size = params->ied_size - (2 << 20);
+
+	/*
+	 * EMRR has 46 bits of valid address aligned to 4KiB. It's dependent
+	 * on the number of physical address bits supported.
+	 */
+	params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
+	params->emrr_base.hi = 0;
+	params->emrr_mask.lo = (~(emrr_size - 1) & rmask)
+		| MTRR_PHYS_MASK_VALID;
+	params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
+
+	/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
+	params->uncore_emrr_base.lo = emrr_base;
+	params->uncore_emrr_base.hi = 0;
+	params->uncore_emrr_mask.lo = (~(emrr_size - 1) & rmask) |
+					MTRR_PHYS_MASK_VALID;
+	params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1;
+}
+
+static void setup_ied_area(struct smm_relocation_params *params)
+{
+	char *ied_base;
+
+	struct ied_header ied = {
+		.signature = "INTEL RSVD",
+		.size = params->ied_size,
+		.reserved = {0},
+	};
+
+	ied_base = (void *)params->ied_base;
+
+	printk(BIOS_DEBUG, "IED base = 0x%08x\n", params->ied_base);
+	printk(BIOS_DEBUG, "IED size = 0x%08x\n", params->ied_size);
+
+	/* Place IED header at IEDBASE. */
+	memcpy(ied_base, &ied, sizeof(ied));
+
+	/* Zero out 32KiB at IEDBASE + 1MiB */
+	memset(ied_base + (1 << 20), 0, (32 << 10));
+}
+
+void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
+				size_t *smm_save_state_size)
+{
+	device_t dev = SA_DEV_ROOT;
+
+	printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
+
+	fill_in_relocation_params(dev, &smm_reloc_params);
+
+	if (smm_reloc_params.ied_size)
+		setup_ied_area(&smm_reloc_params);
+
+	*perm_smbase = smm_reloc_params.smram_base;
+	*perm_smsize = smm_reloc_params.smram_size;
+	*smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
+}
+
+void smm_initialize(void)
+{
+	/* Clear the SMM state in the southbridge. */
+	southbridge_smm_clear_state();
+
+	/*
+	 * Run the relocation handler for on the BSP to check and set up
+	 * parallel SMM relocation.
+	 */
+	smm_initiate_relocation();
+
+	if (smm_reloc_params.smm_save_state_in_msrs)
+		printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
+}
+
+void smm_relocate(void)
+{
+	/*
+	 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
+	 * shall take place. Run the relocation handler a second time on the
+	 * BSP to do * the final move. For APs, a relocation handler always
+	 * needs to be run.
+	 */
+	if (smm_reloc_params.smm_save_state_in_msrs)
+		smm_initiate_relocation_parallel();
+	else if (!boot_cpu())
+		smm_initiate_relocation();
+}
+
+void smm_lock(void)
+{
+	/*
+	 * LOCK the SMM memory window and enable normal SMM.
+	 * After running this function, only a full reset can
+	 * make the SMM registers writable again.
+	 */
+	printk(BIOS_DEBUG, "Locking SMM.\n");
+	pci_write_config8(SA_DEV_ROOT, SMRAM, D_LCK | G_SMRAME | C_BASE_SEG);
+}
diff --git a/src/soc/intel/common/block/include/intelblocks/cpu.h b/src/soc/intel/common/block/include/intelblocks/cpu.h
new file mode 100644
index 0000000..85bd53b
--- /dev/null
+++ b/src/soc/intel/common/block/include/intelblocks/cpu.h
@@ -0,0 +1,27 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SOC_INTEL_COMMON_BLOCK_CPU_H
+#define SOC_INTEL_COMMON_BLOCK_CPU_H
+
+void set_flex_ratio_to_tdp_nominal(void);
+#if !ENV_RAMSTAGE
+void cache_bios_region(void);
+#endif
+int cpu_config_tdp_levels(void);
+void set_P_State_to_nominal_TDP_ratio(void);
+void set_P_State_to_max_non_turbo_ratio(void);
+
+#endif	/* SOC_INTEL_COMMON_BLOCK_CPU_H */

-- 
To view, visit https://review.coreboot.org/19540
To unsubscribe, visit https://review.coreboot.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I2f80c42132d9ea738be4051d2395e9e51ac153f8
Gerrit-PatchSet: 4
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Owner: Barnali Sarkar <barnali.sarkar at intel.com>
Gerrit-Reviewer: Aamir Bohra <aamir.bohra at intel.com>
Gerrit-Reviewer: Balaji Manigandan <balaji.manigandan at intel.com>
Gerrit-Reviewer: Bora Guvendik <bora.guvendik at intel.com>
Gerrit-Reviewer: Brandon Breitenstein <brandon.breitenstein at intel.com>
Gerrit-Reviewer: Cole Nelson <colex.nelson at intel.com>
Gerrit-Reviewer: Hannah Williams <hannah.williams at intel.com>
Gerrit-Reviewer: Ravishankar Sarawadi <ravishankar.sarawadi at intel.com>
Gerrit-Reviewer: Rizwan Qureshi <rizwan.qureshi at intel.com>
Gerrit-Reviewer: Shaunak Saha <shaunak.saha at intel.com>
Gerrit-Reviewer: Subrata Banik <subrata.banik at intel.com>
Gerrit-Reviewer: V Sowmya <v.sowmya at intel.com>
Gerrit-Reviewer: dhaval v sharma <dhaval.v.sharma at intel.com>



More information about the coreboot-gerrit mailing list