HAOUAS Elyes has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/44412 )
Change subject: src/security: Convert to 96 characters line length
......................................................................
src/security: Convert to 96 characters line length
Change-Id: I1adaae4c9eeee1b8cb1a2d9467712762f7ccc6be
Signed-off-by: Elyes HAOUAS <ehaouas(a)noos.fr>
---
M src/security/tpm/tis.h
M src/security/tpm/tspi.h
M src/security/tpm/tspi/crtm.c
M src/security/tpm/tspi/crtm.h
M src/security/tpm/tspi/log.c
M src/security/tpm/tspi/tspi.c
M src/security/tpm/tss.h
M src/security/tpm/tss/tcg-1.2/tss.c
M src/security/tpm/tss/tcg-1.2/tss_internal.h
M src/security/tpm/tss/tcg-1.2/tss_structures.h
M src/security/tpm/tss/tcg-2.0/tss.c
M src/security/tpm/tss/tcg-2.0/tss_marshaling.c
M src/security/tpm/tss/tcg-2.0/tss_marshaling.h
M src/security/tpm/tss/tcg-2.0/tss_structures.h
M src/security/tpm/tss/vendor/cr50/cr50.c
15 files changed, 216 insertions(+), 332 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/12/44412/1
diff --git a/src/security/tpm/tis.h b/src/security/tpm/tis.h
index 5b2c001..c6b519f 100644
--- a/src/security/tpm/tis.h
+++ b/src/security/tpm/tis.h
@@ -35,16 +35,16 @@
/*
* tis_init()
*
- * Initialize the TPM device. Returns 0 on success or -1 on
- * failure (in case device probing did not succeed).
+ * Initialize the TPM device. Returns 0 on success or -1 on failure (in case device probing did
+ * not succeed).
*/
int tis_init(void);
/*
* tis_open()
*
- * Requests access to locality 0 for the caller. After all commands have been
- * completed the caller is supposed to call tis_close().
+ * Requests access to locality 0 for the caller. After all commands have been completed the
+ * caller is supposed to call tis_close().
*
* Returns 0 on success, -1 on failure.
*/
@@ -53,9 +53,8 @@
/*
* tis_close()
*
- * terminate the current session with the TPM by releasing the locked
- * locality. Returns 0 on success of -1 on failure (in case lock
- * removal did not succeed).
+ * terminate the current session with the TPM by releasing the locked locality.
+ * Returns 0 on success of -1 on failure (in case lock removal did not succeed).
*/
int tis_close(void);
@@ -72,8 +71,7 @@
* Returns 0 on success (and places the number of response bytes at recv_len)
* or -1 on failure.
*/
-int tis_sendrecv(const u8 *sendbuf, size_t send_size, u8 *recvbuf,
- size_t *recv_len);
+int tis_sendrecv(const u8 *sendbuf, size_t send_size, u8 *recvbuf, size_t *recv_len);
/*
* tis_plat_irq_status()
diff --git a/src/security/tpm/tspi.h b/src/security/tpm/tspi.h
index a776a4d..077eece 100644
--- a/src/security/tpm/tspi.h
+++ b/src/security/tpm/tspi.h
@@ -12,14 +12,13 @@
#define HASH_DATA_CHUNK_SIZE 1024
/**
- * Get the pointer to the single instance of global
- * tcpa log data, and initialize it when necessary
+ * Get the pointer to the single instance of global tcpa log data, and initialize it when
+ * necessary
*/
struct tcpa_table *tcpa_log_init(void);
/**
- * Clears the pre-RAM tcpa log data and initializes
- * any content with default values
+ * Clears the pre-RAM tcpa log data and initializes any content with default values
*/
void tcpa_preram_log_clear(void);
@@ -51,9 +50,8 @@
* @param name sets additional info where the digest comes from
* @return TPM_SUCCESS on success. If not a tpm error is returned
*/
-uint32_t tpm_extend_pcr(int pcr, enum vb2_hash_algorithm digest_algo,
- uint8_t *digest, size_t digest_len,
- const char *name);
+uint32_t tpm_extend_pcr(int pcr, enum vb2_hash_algorithm digest_algo, uint8_t *digest,
+ size_t digest_len, const char *name);
/**
* Issue a TPM_Clear and reenable/reactivate the TPM.
@@ -75,7 +73,6 @@
* @param *rname Name of the region that is measured
* @return TPM error code in case of error otherwise TPM_SUCCESS
*/
-uint32_t tpm_measure_region(const struct region_device *rdev, uint8_t pcr,
- const char *rname);
+uint32_t tpm_measure_region(const struct region_device *rdev, uint8_t pcr, const char *rname);
#endif /* TSPI_H_ */
diff --git a/src/security/tpm/tspi/crtm.c b/src/security/tpm/tspi/crtm.c
index f5c788c..ff91fde 100644
--- a/src/security/tpm/tspi/crtm.c
+++ b/src/security/tpm/tspi/crtm.c
@@ -7,11 +7,10 @@
#include <string.h>
/*
- * This function sets the TCPA log namespace
- * for the cbfs file (region) lookup.
+ * This function sets the TCPA log namespace for the cbfs file (region) lookup.
*/
-static int create_tcpa_metadata(const struct region_device *rdev,
- const char *cbfs_name, char log_string[TCPA_PCR_HASH_NAME])
+static int create_tcpa_metadata(const struct region_device *rdev, const char *cbfs_name,
+ char log_string[TCPA_PCR_HASH_NAME])
{
int i;
struct region_device fmap;
@@ -26,8 +25,7 @@
if (fmap_locate_area_as_rdev(fmap_cbfs_names[i], &fmap) == 0) {
if (region_is_subregion(region_device_region(&fmap),
region_device_region(rdev))) {
- snprintf(log_string, TCPA_PCR_HASH_NAME,
- "FMAP: %s CBFS: %s",
+ snprintf(log_string, TCPA_PCR_HASH_NAME, "FMAP: %s CBFS: %s",
fmap_cbfs_names[i], cbfs_name);
return 0;
}
@@ -63,19 +61,15 @@
struct cbfsf bootblock_data;
struct region_device bootblock_fmap;
if (fmap_locate_area_as_rdev("BOOTBLOCK", &bootblock_fmap) == 0) {
- if (tpm_measure_region(&bootblock_fmap,
- TPM_CRTM_PCR,
- "FMAP: BOOTBLOCK"))
+ if (tpm_measure_region(&bootblock_fmap, TPM_CRTM_PCR, "FMAP: BOOTBLOCK"))
return VB2_ERROR_UNKNOWN;
} else {
- if (cbfs_boot_locate(&bootblock_data,
- prog_name(&bootblock), NULL)) {
+ if (cbfs_boot_locate(&bootblock_data, prog_name(&bootblock), NULL)) {
/*
* measurement is done in
* tspi_measure_cbfs_hook()
*/
- printk(BIOS_INFO,
- "TSPI: Couldn't measure bootblock into CRTM!\n");
+ printk(BIOS_INFO, "TSPI: Couldn't measure bootblock into CRTM!\n");
return VB2_ERROR_UNKNOWN;
}
}
@@ -111,8 +105,7 @@
if (!tcpa_log_available()) {
if (tspi_init_crtm() != VB2_SUCCESS) {
- printk(BIOS_WARNING,
- "Initializing CRTM failed!");
+ printk(BIOS_WARNING, "Initializing CRTM failed!");
return 0;
}
printk(BIOS_DEBUG, "CRTM initialized.");
@@ -126,8 +119,7 @@
pcr_index = TPM_RUNTIME_DATA_PCR;
break;
/*
- * mrc.bin is code executed on CPU, so it
- * should not be considered runtime data
+ * mrc.bin is code executed on CPU, so it should not be considered runtime data
*/
case CBFS_TYPE_MRC:
case CBFS_TYPE_STAGE:
@@ -170,16 +162,12 @@
for (i = 0; i < tclt->num_entries; i++) {
struct tcpa_entry *tce = &tclt->entries[i];
if (tce) {
- printk(BIOS_DEBUG, "TPM: Write digest for"
- " %s into PCR %d\n",
+ printk(BIOS_DEBUG, "TPM: Write digest for %s into PCR %d\n",
tce->name, tce->pcr);
- int result = tlcl_extend(tce->pcr,
- tce->digest,
- NULL);
+ int result = tlcl_extend(tce->pcr, tce->digest, NULL);
if (result != TPM_SUCCESS) {
printk(BIOS_ERR, "TPM: Writing digest"
- " of %s into PCR failed with error"
- " %d\n",
+ " of %s into PCR failed with error %d\n",
tce->name, result);
return VB2_ERROR_UNKNOWN;
}
diff --git a/src/security/tpm/tspi/crtm.h b/src/security/tpm/tspi/crtm.h
index 1b29854..c3d55c9 100644
--- a/src/security/tpm/tspi/crtm.h
+++ b/src/security/tpm/tspi/crtm.h
@@ -11,25 +11,20 @@
/* CRTM */
#define TPM_CRTM_PCR 2
-/* PCR for measuring data which changes during runtime
- * e.g. CMOS, NVRAM...
- */
+/* PCR for measuring data which changes during runtime e.g. CMOS, NVRAM... */
#define TPM_RUNTIME_DATA_PCR 3
/*
- * Initializes the Core Root of Trust for Measurements
- * in coreboot. The initial code in a chain of trust must measure
- * itself.
+ * Initializes the Core Root of Trust for Measurements in coreboot. The initial code in a chain
+ * of trust must measure itself.
*
* Summary:
* + Measures bootblock in CBFS or BOOTBLOCK FMAP partition.
- * + If vboot starts in romstage, it measures the romstage
- * in CBFS.
- * + Measure the verstage if it is compiled as separate
- * stage.
+ * + If vboot starts in romstage, it measures the romstage in CBFS.
+ * + Measure the verstage if it is compiled as separate stage.
*
* Takes the current vboot context as parameter for s3 checks.
- * returns on success VB2_SUCCESS, else a vboot error.
+ * Returns on success VB2_SUCCESS, else a vboot error.
*/
uint32_t tspi_init_crtm(void);
diff --git a/src/security/tpm/tspi/log.c b/src/security/tpm/tspi/log.c
index 1d6f9ac..9ea6d33 100644
--- a/src/security/tpm/tspi/log.c
+++ b/src/security/tpm/tspi/log.c
@@ -36,11 +36,9 @@
/* We are dealing here with pre CBMEM environment.
* If cbmem isn't available use CAR or SRAM */
- if (!cbmem_possibly_online() &&
- !CONFIG(VBOOT_RETURN_FROM_VERSTAGE))
+ if (!cbmem_possibly_online() && !CONFIG(VBOOT_RETURN_FROM_VERSTAGE))
return (struct tcpa_table *)_tpm_tcpa_log;
- else if (ENV_ROMSTAGE &&
- !CONFIG(VBOOT_RETURN_FROM_VERSTAGE)) {
+ else if (ENV_ROMSTAGE && !CONFIG(VBOOT_RETURN_FROM_VERSTAGE)) {
tclt = tcpa_cbmem_init();
if (!tclt)
return (struct tcpa_table *)_tpm_tcpa_log;
@@ -69,16 +67,14 @@
for (j = 0; j < tce->digest_length; j++)
printk(BIOS_INFO, "%02x", tce->digest[j]);
- printk(BIOS_INFO, " %s [%s]\n",
- tce->digest_type, tce->name);
+ printk(BIOS_INFO, " %s [%s]\n", tce->digest_type, tce->name);
}
}
printk(BIOS_INFO, "\n");
}
void tcpa_log_add_table_entry(const char *name, const uint32_t pcr,
- enum vb2_hash_algorithm digest_algo,
- const uint8_t *digest,
+ enum vb2_hash_algorithm digest_algo, const uint8_t *digest,
const size_t digest_len)
{
struct tcpa_table *tclt = tcpa_log_init();
@@ -106,8 +102,7 @@
return;
}
- strncpy(tce->digest_type,
- vb2_get_hash_algorithm_name(digest_algo),
+ strncpy(tce->digest_type, vb2_get_hash_algorithm_name(digest_algo),
TCPA_PCR_HASH_LEN - 1);
tce->digest_length = digest_len;
memcpy(tce->digest, digest, tce->digest_length);
diff --git a/src/security/tpm/tspi/tspi.c b/src/security/tpm/tspi/tspi.c
index 6ef0138..3cb5556 100644
--- a/src/security/tpm/tspi/tspi.c
+++ b/src/security/tpm/tspi/tspi.c
@@ -35,12 +35,10 @@
}
if (!!deactivated != CONFIG(TPM_DEACTIVATE)) {
- printk(BIOS_INFO,
- "TPM: Unexpected TPM deactivated state. Toggling...\n");
+ printk(BIOS_INFO, "TPM: Unexpected TPM deactivated state. Toggling...\n");
result = tlcl_set_deactivated(!deactivated);
if (result != TPM_SUCCESS) {
- printk(BIOS_ERR,
- "TPM: Can't toggle deactivated state.\n");
+ printk(BIOS_ERR, "TPM: Can't toggle deactivated state.\n");
return result;
}
@@ -63,8 +61,8 @@
case TPM_E_INVALID_POSTINIT:
/*
- * We're on a platform where the TPM maintains power
- * in S3, so it's already initialized.
+ * We're on a platform where the TPM maintains power in S3, so it's already
+ * initialized.
*/
printk(BIOS_INFO, "TPM: Already initialized.\n");
result = TPM_SUCCESS;
@@ -92,11 +90,10 @@
static inline int tspi_tpm_is_setup(void)
{
/*
- * vboot_logic_executed() only starts returning true at the end of
- * verstage, but the vboot logic itself already wants to extend PCRs
- * before that. So in the stage where verification actually runs, we
- * need to check tpm_is_setup. Skip that check in all other stages so
- * this whole function can be evaluated at compile time.
+ * vboot_logic_executed() only starts returning true at the end of verstage, but the
+ * vboot logic itself already wants to extend PCRs before that. So in the stage where
+ * verification actually runs, we need to check tpm_is_setup. Skip that check in all
+ * other stages so this whole function can be evaluated at compile time.
*/
if (CONFIG(VBOOT)) {
if (verification_should_run())
@@ -111,24 +108,21 @@
}
/*
- * tpm_setup starts the TPM and establishes the root of trust for the
- * anti-rollback mechanism. tpm_setup can fail for three reasons. 1 A bug.
- * 2 a TPM hardware failure. 3 An unexpected TPM state due to some attack. In
- * general we cannot easily distinguish the kind of failure, so our strategy is
- * to reboot in recovery mode in all cases. The recovery mode calls tpm_setup
- * again, which executes (almost) the same sequence of operations. There is a
- * good chance that, if recovery mode was entered because of a TPM failure, the
- * failure will repeat itself. (In general this is impossible to guarantee
- * because we have no way of creating the exact TPM initial state at the
- * previous boot.) In recovery mode, we ignore the failure and continue, thus
- * giving the recovery kernel a chance to fix things (that's why we don't set
- * bGlobalLock). The choice is between a knowingly insecure device and a
- * bricked device.
+ * tpm_setup starts the TPM and establishes the root of trust for the anti-rollback mechanism.
+ * tpm_setup can fail for three reasons. 1 A bug.
+ * 2 a TPM hardware failure. 3 An unexpected TPM state due to some attack. In general we cannot
+ * easily distinguish the kind of failure, so our strategy is to reboot in recovery mode in all
+ * cases. The recovery mode calls tpm_setup again, which executes (almost) the same sequence of
+ * operations. There is a good chance that, if recovery mode was entered because of a TPM
+ * failure, the failure will repeat itself. (In general this is impossible to guarantee because
+ * we have no way of creating the exact TPM initial state at the previous boot.) In recovery
+ * mode, we ignore the failure and continue, thus giving the recovery kernel a chance to fix
+ * things (that's why we don't set bGlobalLock). The choice is between a knowingly insecure
+ * device and a bricked device.
*
- * As a side note, observe that we go through considerable hoops to avoid using
- * the STCLEAR permissions for the index spaces. We do this to avoid writing
- * to the TPM flashram at every reboot or wake-up, because of concerns about
- * the durability of the NVRAM.
+ * As a side note, observe that we go through considerable hoops to avoid using the STCLEAR
+ * permissions for the index spaces. We do this to avoid writing to the TPM flashram at every
+ * reboot or wake-up, because of concerns about the durability of the NVRAM.
*/
uint32_t tpm_setup(int s3flag)
{
@@ -147,8 +141,7 @@
}
result = tlcl_startup();
- if (CONFIG(TPM_STARTUP_IGNORE_POSTINIT)
- && result == TPM_E_INVALID_POSTINIT) {
+ if (CONFIG(TPM_STARTUP_IGNORE_POSTINIT) && result == TPM_E_INVALID_POSTINIT) {
printk(BIOS_DEBUG, "TPM: ignoring invalid POSTINIT\n");
result = TPM_SUCCESS;
}
@@ -160,9 +153,8 @@
result = tlcl_assert_physical_presence();
if (result != TPM_SUCCESS) {
/*
- * It is possible that the TPM was delivered with the physical
- * presence command disabled. This tries enabling it, then
- * tries asserting PP again.
+ * It is possible that the TPM was delivered with the physical presence command
+ * disabled. This tries enabling it, then tries asserting PP again.
*/
result = tlcl_physical_presence_cmd_enable();
if (result != TPM_SUCCESS) {
@@ -215,8 +207,8 @@
return TPM_SUCCESS;
}
-uint32_t tpm_extend_pcr(int pcr, enum vb2_hash_algorithm digest_algo,
- uint8_t *digest, size_t digest_len, const char *name)
+uint32_t tpm_extend_pcr(int pcr, enum vb2_hash_algorithm digest_algo, uint8_t *digest,
+ size_t digest_len, const char *name)
{
uint32_t result;
@@ -237,15 +229,13 @@
}
if (CONFIG(TPM_MEASURED_BOOT))
- tcpa_log_add_table_entry(name, pcr, digest_algo,
- digest, digest_len);
+ tcpa_log_add_table_entry(name, pcr, digest_algo, digest, digest_len);
return TPM_SUCCESS;
}
#if CONFIG(VBOOT_LIB)
-uint32_t tpm_measure_region(const struct region_device *rdev, uint8_t pcr,
- const char *rname)
+uint32_t tpm_measure_region(const struct region_device *rdev, uint8_t pcr, const char *rname)
{
uint8_t digest[TPM_PCR_MAX_LEN], digest_len;
uint8_t buf[HASH_DATA_CHUNK_SIZE];
@@ -270,15 +260,14 @@
return TPM_E_HASH_ERROR;
}
/*
- * Though one can mmap the full needed region on x86 this is not the
- * case for e.g. ARM. In order to make this code as universal as
- * possible across different platforms read the data to hash in chunks.
+ * Though one can mmap the full needed region on x86 this is not the case for e.g. ARM.
+ * In order to make this code as universal as possible across different platforms read
+ * the data to hash in chunks.
*/
for (offset = 0; offset < region_device_sz(rdev); offset += len) {
len = MIN(sizeof(buf), region_device_sz(rdev) - offset);
if (rdev_readat(rdev, buf, offset, len) < 0) {
- printk(BIOS_ERR, "TPM: Not able to read region %s.\n",
- rname);
+ printk(BIOS_ERR, "TPM: Not able to read region %s.\n", rname);
return TPM_E_READ_FAILURE;
}
if (vb2_digest_extend(&ctx, buf, len)) {
@@ -295,8 +284,8 @@
printk(BIOS_ERR, "TPM: Extending hash into PCR failed.\n");
return result;
}
- printk(BIOS_DEBUG, "TPM: Digest of %s to PCR %d %s\n",
- rname, pcr, tspi_tpm_is_setup() ? "measured" : "logged");
+ printk(BIOS_DEBUG, "TPM: Digest of %s to PCR %d %s\n", rname, pcr,
+ tspi_tpm_is_setup() ? "measured" : "logged");
return TPM_SUCCESS;
}
#endif /* VBOOT_LIB */
diff --git a/src/security/tpm/tss.h b/src/security/tpm/tss.h
index 4a8206d0..6d39d1b 100644
--- a/src/security/tpm/tss.h
+++ b/src/security/tpm/tss.h
@@ -20,8 +20,8 @@
#include <security/tpm/tss/tcg-1.2/tss_structures.h>
/**
- * Define a space with permission [perm]. [index] is the index for the space,
- * [size] the usable data size. The TPM error code is returned.
+ * Define a space with permission [perm]. [index] is the index for the space, [size] the usable
+ * data size. The TPM error code is returned.
*/
uint32_t tlcl_define_space(uint32_t index, uint32_t perm, uint32_t size);
@@ -36,11 +36,10 @@
uint32_t tlcl_set_deactivated(uint8_t flag);
/**
- * Get flags of interest. Pointers for flags you aren't interested in may
- * be NULL. The TPM error code is returned.
+ * Get flags of interest. Pointers for flags you aren't interested in may be NULL.
+ * The TPM error code is returned.
*/
-uint32_t tlcl_get_flags(uint8_t *disable, uint8_t *deactivated,
- uint8_t *nvlocked);
+uint32_t tlcl_get_flags(uint8_t *disable, uint8_t *deactivated, uint8_t *nvlocked);
/**
* Get the entire set of permanent flags.
@@ -54,18 +53,16 @@
#include <security/tpm/tss/tcg-2.0/tss_structures.h>
/*
- * Define a TPM2 space. The define space command TPM command used by the tlcl
- * layer offers the ability to use custom nv attributes and policies.
+ * Define a TPM2 space. The define space command TPM command used by the tlcl layer offers the
+ * ability to use custom nv attributes and policies.
*/
-uint32_t tlcl_define_space(uint32_t space_index, size_t space_size,
- const TPMA_NV nv_attributes,
+uint32_t tlcl_define_space(uint32_t space_index, size_t space_size, const TPMA_NV nv_attributes,
const uint8_t *nv_policy, size_t nv_policy_size);
/*
* Issue TPM2_GetCapability command
*/
-uint32_t tlcl_get_capability(TPM_CAP capability, uint32_t property,
- uint32_t property_count,
+uint32_t tlcl_get_capability(TPM_CAP capability, uint32_t property, uint32_t property_count,
TPMS_CAPABILITY_DATA *capability_data);
/*
@@ -90,27 +87,23 @@
/**
* Perform a raw TPM request/response transaction.
*/
-uint32_t tlcl_send_receive(const uint8_t *request, uint8_t *response,
- int max_length);
+uint32_t tlcl_send_receive(const uint8_t *request, uint8_t *response, int max_length);
/* Commands */
/**
- * Send a TPM_Startup(ST_CLEAR). The TPM error code is returned (0 for
- * success).
+ * Send a TPM_Startup(ST_CLEAR). The TPM error code is returned (0 for success).
*/
uint32_t tlcl_startup(void);
/**
- * Resume by sending a TPM_Startup(ST_STATE). The TPM error code is returned
- * (0 for success).
+ * Resume by sending a TPM_Startup(ST_STATE). The TPM error code is returned (0 for success).
*/
uint32_t tlcl_resume(void);
/**
- * Save TPM state by sending either TPM_SaveState() (TPM1.2) or
- * TPM_Shutdown(ST_STATE) (TPM2.0). The TPM error code is returned (0 for
- * success).
+ * Save TPM state by sending either TPM_SaveState() (TPM1.2) or TPM_Shutdown(ST_STATE) (TPM2.0).
+ * The TPM error code is returned (0 for success).
*/
uint32_t tlcl_save_state(void);
@@ -128,14 +121,12 @@
uint32_t tlcl_continue_self_test(void);
/**
- * Write [length] bytes of [data] to space at [index]. The TPM error code is
- * returned.
+ * Write [length] bytes of [data] to space at [index]. The TPM error code is returned.
*/
uint32_t tlcl_write(uint32_t index, const void *data, uint32_t length);
/**
- * Read [length] bytes from space at [index] into [data]. The TPM error code
- * is returned.
+ * Read [length] bytes from space at [index] into [data]. The TPM error code is returned.
*/
uint32_t tlcl_read(uint32_t index, void *data, uint32_t length);
@@ -150,8 +141,8 @@
uint32_t tlcl_physical_presence_cmd_enable(void);
/**
- * Finalize the physical presence settings: software PP is enabled, hardware PP
- * is disabled, and the lifetime lock is set. The TPM error code is returned.
+ * Finalize the physical presence settings: software PP is enabled, hardware PP is disabled,
+ * and the lifetime lock is set. The TPM error code is returned.
*/
uint32_t tlcl_finalize_physical_presence(void);
@@ -171,8 +162,7 @@
uint32_t tlcl_clear_control(bool disable);
/**
- * Set the bGlobalLock flag, which only a reboot can clear. The TPM error
- * code is returned.
+ * Set the bGlobalLock flag, which only a reboot can clear. The TPM error code is returned.
*/
uint32_t tlcl_set_global_lock(void);
@@ -184,8 +174,7 @@
/**
* Perform a TPM_Extend.
*/
-uint32_t tlcl_extend(int pcr_num, const uint8_t *in_digest,
- uint8_t *out_digest);
+uint32_t tlcl_extend(int pcr_num, const uint8_t *in_digest, uint8_t *out_digest);
/**
* Disable platform hierarchy. Specific to TPM2. The TPM error code is returned.
diff --git a/src/security/tpm/tss/tcg-1.2/tss.c b/src/security/tpm/tss/tcg-1.2/tss.c
index 30cd1cc..3bb6530 100644
--- a/src/security/tpm/tss/tcg-1.2/tss.c
+++ b/src/security/tpm/tss/tcg-1.2/tss.c
@@ -3,13 +3,11 @@
/*
* A lightweight TPM command library.
*
- * The general idea is that TPM commands are array of bytes whose
- * fields are mostly compile-time constant. The goal is to build much
- * of the commands at compile time (or build time) and change some of
- * the fields at run time as needed. The code in
- * utility/tlcl_generator.c builds structures containing the commands,
- * as well as the offsets of the fields that need to be set at run
- * time.
+ * The general idea is that TPM commands are array of bytes whose fields are mostly compile-time
+ * constant. The goal is to build much of the commands at compile time (or build time) and
+ * change some of the fields at run time as needed. The code in utility/tlcl_generator.c builds
+ * structures containing the commands, as well as the offsets of the fields that need to be set
+ * at run time.
*/
#include <assert.h>
@@ -24,10 +22,8 @@
#include <console/console.h>
#define VBDEBUG(format, args...) printk(BIOS_DEBUG, format, ## args)
-static int tpm_send_receive(const uint8_t *request,
- uint32_t request_length,
- uint8_t *response,
- uint32_t *response_length)
+static int tpm_send_receive(const uint8_t *request, uint32_t request_length, uint8_t *response,
+ uint32_t *response_length)
{
size_t len = *response_length;
if (tis_sendrecv(request, request_length, response, &len))
@@ -69,11 +65,11 @@
}
/*
- * Like TlclSendReceive below, but do not retry if NEEDS_SELFTEST or
- * DOING_SELFTEST errors are returned.
+ * Like TlclSendReceive below, but do not retry if NEEDS_SELFTEST or DOING_SELFTEST errors are
+ * returned.
*/
-static uint32_t tlcl_send_receive_no_retry(const uint8_t *request,
- uint8_t *response, int max_length)
+static uint32_t tlcl_send_receive_no_retry(const uint8_t *request, uint8_t *response,
+ int max_length)
{
uint32_t response_length = max_length;
uint32_t result;
@@ -89,43 +85,36 @@
/* Otherwise, use the result code from the response */
result = tpm_return_code(response);
- /* TODO: add paranoia about returned response_length vs. max_length
- * (and possibly expected length from the response header). See
- * crosbug.com/17017 */
+ /* TODO: add paranoia about returned response_length vs. max_length (and possibly
+ * expected length from the response header). See crosbug.com/17017 */
- VBDEBUG("TPM: command 0x%x returned 0x%x\n",
- tpm_command_code(request), result);
+ VBDEBUG("TPM: command 0x%x returned 0x%x\n", tpm_command_code(request), result);
return result;
}
-/* Sends a TPM command and gets a response. Returns 0 if success or the TPM
- * error code if error. Waits for the self test to complete if needed. */
-uint32_t tlcl_send_receive(const uint8_t *request, uint8_t *response,
- int max_length)
+/* Sends a TPM command and gets a response. Returns 0 if success or the TPM error code if error.
+ * Waits for the self test to complete if needed. */
+uint32_t tlcl_send_receive(const uint8_t *request, uint8_t *response, int max_length)
{
- uint32_t result = tlcl_send_receive_no_retry(request, response,
- max_length);
- /* If the command fails because the self test has not completed, try it
- * again after attempting to ensure that the self test has completed. */
+ uint32_t result = tlcl_send_receive_no_retry(request, response, max_length);
+ /* If the command fails because the self test has not completed, try it again after
+ * attempting to ensure that the self test has completed. */
if (result == TPM_E_NEEDS_SELFTEST || result == TPM_E_DOING_SELFTEST) {
result = tlcl_continue_self_test();
if (result != TPM_SUCCESS)
return result;
#if defined(TPM_BLOCKING_CONTINUESELFTEST) || defined(VB_RECOVERY_MODE)
/* Retry only once */
- result = tlcl_send_receive_no_retry(request, response,
- max_length);
+ result = tlcl_send_receive_no_retry(request, response, max_length);
#else
/* This needs serious testing. The TPM specification says: "iii.
- * The caller MUST wait for the actions of TPM_ContinueSelfTest
- * to complete before reissuing the command C1." But, if
- * ContinueSelfTest is non-blocking, how do we know that the
- * actions have completed other than trying again? */
+ * The caller MUST wait for the actions of TPM_ContinueSelfTest to complete
+ * before reissuing the command C1." But, if ContinueSelfTest is non-blocking,
+ * how do we know that the actions have completed other than trying again? */
do {
- result = tlcl_send_receive_no_retry(request, response,
- max_length);
+ result = tlcl_send_receive_no_retry(request, response, max_length);
} while (result == TPM_E_DOING_SELFTEST);
#endif
}
@@ -206,8 +195,7 @@
{
struct s_tpm_nv_write_cmd cmd;
uint8_t response[TPM_LARGE_ENOUGH_COMMAND_SIZE];
- const int total_length =
- kTpmRequestHeaderLength + kWriteInfoLength + length;
+ const int total_length = kTpmRequestHeaderLength + kWriteInfoLength + length;
VBDEBUG("TPM: tlcl_write(0x%x, %d)\n", index, length);
memcpy(&cmd, &tpm_nv_write_cmd, sizeof(cmd));
@@ -296,8 +284,7 @@
{
uint8_t response[TPM_LARGE_ENOUGH_COMMAND_SIZE];
uint32_t size;
- uint32_t result = tlcl_send_receive(tpm_getflags_cmd.buffer, response,
- sizeof(response));
+ uint32_t result = tlcl_send_receive(tpm_getflags_cmd.buffer, response, sizeof(response));
if (result != TPM_SUCCESS)
return result;
from_tpm_uint32(response + kTpmResponseHeaderLength, &size);
@@ -308,8 +295,7 @@
return result;
}
-uint32_t tlcl_get_flags(uint8_t *disable, uint8_t *deactivated,
- uint8_t *nvlocked)
+uint32_t tlcl_get_flags(uint8_t *disable, uint8_t *deactivated, uint8_t *nvlocked)
{
TPM_PERMANENT_FLAGS pflags;
uint32_t result = tlcl_get_permanent_flags(&pflags);
@@ -333,8 +319,7 @@
return tlcl_write(TPM_NV_INDEX0, (uint8_t *) &x, 0);
}
-uint32_t tlcl_extend(int pcr_num, const uint8_t *in_digest,
- uint8_t *out_digest)
+uint32_t tlcl_extend(int pcr_num, const uint8_t *in_digest, uint8_t *out_digest)
{
struct s_tpm_extend_cmd cmd;
uint8_t response[kTpmResponseHeaderLength + kPcrDigestLength];
@@ -349,8 +334,7 @@
return result;
if (out_digest)
- memcpy(out_digest, response + kTpmResponseHeaderLength,
- kPcrDigestLength);
+ memcpy(out_digest, response + kTpmResponseHeaderLength, kPcrDigestLength);
return result;
}
diff --git a/src/security/tpm/tss/tcg-1.2/tss_internal.h b/src/security/tpm/tss/tcg-1.2/tss_internal.h
index 1f49f04..66b000f 100644
--- a/src/security/tpm/tss/tcg-1.2/tss_internal.h
+++ b/src/security/tpm/tss/tcg-1.2/tss_internal.h
@@ -6,8 +6,8 @@
#include <stdint.h>
/*
- * These numbers derive from adding the sizes of command fields as shown in the
- * TPM commands manual.
+ * These numbers derive from adding the sizes of command fields as shown in the TPM commands
+ * manual.
*/
#define kTpmRequestHeaderLength 10
#define kTpmResponseHeaderLength 10
@@ -17,9 +17,8 @@
/*
- * Conversion functions. to_tpm_TYPE puts a value of type TYPE into a TPM
- * command buffer. from_tpm_TYPE gets a value of type TYPE from a TPM command
- * buffer into a variable.
+ * Conversion functions. to_tpm_TYPE puts a value of type TYPE into a TPM command buffer.
+ * from_tpm_TYPE gets a value of type TYPE from a TPM command buffer into a variable.
*/
__attribute__((unused))
static inline void to_tpm_uint32(uint8_t *buffer, uint32_t x)
@@ -36,10 +35,7 @@
__attribute__((unused))
static inline void from_tpm_uint32(const uint8_t *buffer, uint32_t *x)
{
- *x = ((buffer[0] << 24) |
- (buffer[1] << 16) |
- (buffer[2] << 8) |
- buffer[3]);
+ *x = ((buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]);
}
/*
diff --git a/src/security/tpm/tss/tcg-1.2/tss_structures.h b/src/security/tpm/tss/tcg-1.2/tss_structures.h
index 4a976c8..a104665 100644
--- a/src/security/tpm/tss/tcg-1.2/tss_structures.h
+++ b/src/security/tpm/tss/tcg-1.2/tss_structures.h
@@ -1,8 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/*
- * Some TPM constants and type definitions for standalone compilation for use
- * in the firmware
+ * Some TPM constants and type definitions for standalone compilation for use in the firmware
*/
#ifndef TCG1_TSS_STRUCTURES_H_
#define TCG1_TSS_STRUCTURES_H_
diff --git a/src/security/tpm/tss/tcg-2.0/tss.c b/src/security/tpm/tss/tcg-2.0/tss.c
index 79d8eb9..d138065 100644
--- a/src/security/tpm/tss/tcg-2.0/tss.c
+++ b/src/security/tpm/tss/tcg-2.0/tss.c
@@ -12,8 +12,7 @@
/*
* This file provides interface between firmware and TPM2 device. The TPM1.2
- * API was copied as is and relevant functions modified to comply with the
- * TPM2 specification.
+ * API was copied as is and relevant functions modified to comply with the TPM2 specification.
*/
void *tpm_process_command(TPM_CC command, void *command_body)
@@ -112,18 +111,15 @@
uint32_t tlcl_assert_physical_presence(void)
{
/*
- * Nothing to do on TPM2 for this, use platform hierarchy availability
- * instead.
+ * Nothing to do on TPM2 for this, use platform hierarchy availability instead.
*/
return TPM_SUCCESS;
}
/*
- * The caller will provide the digest in a 32 byte buffer, let's consider it a
- * sha256 digest.
+ * The caller will provide the digest in a 32 byte buffer, let's consider it a sha256 digest.
*/
-uint32_t tlcl_extend(int pcr_num, const uint8_t *in_digest,
- uint8_t *out_digest)
+uint32_t tlcl_extend(int pcr_num, const uint8_t *in_digest, uint8_t *out_digest)
{
struct tpm2_pcr_extend_cmd pcr_ext_cmd;
struct tpm2_response *response;
@@ -136,8 +132,8 @@
response = tpm_process_command(TPM2_PCR_Extend, &pcr_ext_cmd);
- printk(BIOS_INFO, "%s: response is %x\n",
- __func__, response ? response->hdr.tpm_code : -1);
+ printk(BIOS_INFO, "%s: response is %x\n", __func__,
+ response ? response->hdr.tpm_code : -1);
if (!response || response->hdr.tpm_code)
return TPM_E_IOERROR;
@@ -156,8 +152,8 @@
struct tpm2_response *response;
response = tpm_process_command(TPM2_Clear, NULL);
- printk(BIOS_INFO, "%s: response is %x\n",
- __func__, response ? response->hdr.tpm_code : -1);
+ printk(BIOS_INFO, "%s: response is %x\n", __func__,
+ response ? response->hdr.tpm_code : -1);
if (!response || response->hdr.tpm_code)
return TPM_E_IOERROR;
@@ -173,8 +169,8 @@
};
response = tpm_process_command(TPM2_ClearControl, &cc);
- printk(BIOS_INFO, "%s: response is %x\n",
- __func__, response ? response->hdr.tpm_code : -1);
+ printk(BIOS_INFO, "%s: response is %x\n", __func__,
+ response ? response->hdr.tpm_code : -1);
if (!response || response->hdr.tpm_code)
return TPM_E_IOERROR;
@@ -227,8 +223,8 @@
if (!response)
return TPM_E_READ_FAILURE;
- printk(BIOS_INFO, "%s:%d index %#x return code %x\n",
- __FILE__, __LINE__, index, response->hdr.tpm_code);
+ printk(BIOS_INFO, "%s:%d index %#x return code %x\n", __FILE__, __LINE__, index,
+ response->hdr.tpm_code);
switch (response->hdr.tpm_code) {
case 0:
break;
@@ -236,8 +232,7 @@
/* Uninitialized, returned if the space hasn't been written. */
case TPM_RC_NV_UNINITIALIZED:
/*
- * Bad index, cr50 specific value, returned if the space
- * hasn't been defined.
+ * Bad index, cr50 specific value, returned if the space hasn't been defined.
*/
case TPM_RC_CR50_NV_UNDEFINED:
return TPM_E_BADINDEX;
@@ -265,8 +260,8 @@
st.yes_no = 1;
response = tpm_process_command(TPM2_SelfTest, &st);
- printk(BIOS_INFO, "%s: response is %x\n",
- __func__, response ? response->hdr.tpm_code : -1);
+ printk(BIOS_INFO, "%s: response is %x\n", __func__,
+ response ? response->hdr.tpm_code : -1);
return TPM_SUCCESS;
}
@@ -280,8 +275,8 @@
response = tpm_process_command(TPM2_NV_WriteLock, &nv_wl);
- printk(BIOS_INFO, "%s: response is %x\n",
- __func__, response ? response->hdr.tpm_code : -1);
+ printk(BIOS_INFO, "%s: response is %x\n", __func__,
+ response ? response->hdr.tpm_code : -1);
if (!response || response->hdr.tpm_code)
return TPM_E_IOERROR;
@@ -307,8 +302,8 @@
response = tpm_process_command(TPM2_NV_Write, &nv_writec);
- printk(BIOS_INFO, "%s: response is %x\n",
- __func__, response ? response->hdr.tpm_code : -1);
+ printk(BIOS_INFO, "%s: response is %x\n", __func__,
+ response ? response->hdr.tpm_code : -1);
/* Need to map tpm error codes into internal values. */
if (!response || response->hdr.tpm_code)
@@ -317,8 +312,7 @@
return TPM_SUCCESS;
}
-uint32_t tlcl_define_space(uint32_t space_index, size_t space_size,
- const TPMA_NV nv_attributes,
+uint32_t tlcl_define_space(uint32_t space_index, size_t space_size, const TPMA_NV nv_attributes,
const uint8_t *nv_policy, size_t nv_policy_size)
{
struct tpm2_nv_define_space_cmd nvds_cmd;
@@ -333,9 +327,8 @@
nvds_cmd.publicInfo.attributes = nv_attributes;
/*
- * Use policy digest based on default pcr0 value. This makes
- * sure that the space can not be deleted as soon as PCR0
- * value has been extended from default.
+ * Use policy digest based on default pcr0 value. This makes sure that the space can not
+ * be deleted as soon as PCR0 value has been extended from default.
*/
if (nv_policy && nv_policy_size) {
nvds_cmd.publicInfo.authPolicy.t.buffer = nv_policy;
@@ -384,8 +377,7 @@
value = SM3_256_DIGEST_SIZE;
break;
default:
- printk(BIOS_SPEW, "%s: unknown hash algorithm %d\n", __func__,
- hash_algo);
+ printk(BIOS_SPEW, "%s: unknown hash algorithm %d\n", __func__, hash_algo);
value = 0;
};
@@ -408,9 +400,8 @@
return TPM_SUCCESS;
}
-uint32_t tlcl_get_capability(TPM_CAP capability, uint32_t property,
- uint32_t property_count,
- TPMS_CAPABILITY_DATA *capability_data)
+uint32_t tlcl_get_capability(TPM_CAP capability, uint32_t property, uint32_t property_count,
+ TPMS_CAPABILITY_DATA *capability_data)
{
struct tpm2_get_capability cmd;
struct tpm2_response *response;
@@ -420,8 +411,8 @@
cmd.propertyCount = property_count;
if (property_count > 1) {
- printk(BIOS_ERR, "%s: property_count more than one not "
- "supported yet\n", __func__);
+ printk(BIOS_ERR, "%s: property_count more than one not supported yet\n",
+ __func__);
return TPM_E_IOERROR;
}
diff --git a/src/security/tpm/tss/tcg-2.0/tss_marshaling.c b/src/security/tpm/tss/tcg-2.0/tss_marshaling.c
index f31c7d0..11541e8 100644
--- a/src/security/tpm/tss/tcg-2.0/tss_marshaling.c
+++ b/src/security/tpm/tss/tcg-2.0/tss_marshaling.c
@@ -28,8 +28,7 @@
return obuf_write_be16(ob, cmd_body->shutdown_type);
}
-static int marshal_get_capability(struct obuf *ob,
- struct tpm2_get_capability *cmd_body)
+static int marshal_get_capability(struct obuf *ob, struct tpm2_get_capability *cmd_body)
{
int rc = 0;
@@ -116,8 +115,7 @@
return rc;
}
-static int marshal_session_header(struct obuf *ob,
- struct tpm2_session_header *session_header)
+static int marshal_session_header(struct obuf *ob, struct tpm2_session_header *session_header)
{
int rc = 0;
struct obuf ob_sz;
@@ -147,11 +145,9 @@
}
/*
- * Common session header can include one or two handles and an empty
- * session_header structure.
+ * Common session header can include one or two handles and an empty session_header structure.
*/
-static int marshal_common_session_header(struct obuf *ob,
- const uint32_t *handles,
+static int marshal_common_session_header(struct obuf *ob, const uint32_t *handles,
size_t handle_count)
{
size_t i;
@@ -170,8 +166,7 @@
return rc;
}
-static int marshal_nv_define_space(struct obuf *ob,
- struct tpm2_nv_define_space_cmd *nvd_in)
+static int marshal_nv_define_space(struct obuf *ob, struct tpm2_nv_define_space_cmd *nvd_in)
{
const uint32_t handle[] = { TPM_RH_PLATFORM };
struct obuf ob_sz;
@@ -197,8 +192,7 @@
return rc;
}
-static int marshal_nv_write(struct obuf *ob,
- struct tpm2_nv_write_cmd *command_body)
+static int marshal_nv_write(struct obuf *ob, struct tpm2_nv_write_cmd *command_body)
{
int rc = 0;
uint32_t handles[] = { TPM_RH_PLATFORM, command_body->nvIndex };
@@ -210,16 +204,14 @@
return rc;
}
-static int marshal_nv_write_lock(struct obuf *ob,
- struct tpm2_nv_write_lock_cmd *command_body)
+static int marshal_nv_write_lock(struct obuf *ob, struct tpm2_nv_write_lock_cmd *command_body)
{
uint32_t handles[] = { TPM_RH_PLATFORM, command_body->nvIndex };
return marshal_common_session_header(ob, handles, ARRAY_SIZE(handles));
}
-static int marshal_pcr_extend(struct obuf *ob,
- struct tpm2_pcr_extend_cmd *command_body)
+static int marshal_pcr_extend(struct obuf *ob, struct tpm2_pcr_extend_cmd *command_body)
{
int rc = 0;
uint32_t handles[] = { command_body->pcrHandle };
@@ -230,8 +222,7 @@
return rc;
}
-static int marshal_nv_read(struct obuf *ob,
- struct tpm2_nv_read_cmd *command_body)
+static int marshal_nv_read(struct obuf *ob, struct tpm2_nv_read_cmd *command_body)
{
int rc = 0;
uint32_t handles[] = { TPM_RH_PLATFORM, command_body->nvIndex };
@@ -251,8 +242,7 @@
return marshal_common_session_header(ob, handle, ARRAY_SIZE(handle));
}
-static int marshal_selftest(struct obuf *ob,
- struct tpm2_self_test *command_body)
+static int marshal_selftest(struct obuf *ob, struct tpm2_self_test *command_body)
{
return obuf_write_be8(ob, command_body->yes_no);
}
@@ -276,8 +266,7 @@
return rc;
}
-static int marshal_clear_control(struct obuf *ob,
- struct tpm2_clear_control_cmd *command_body)
+static int marshal_clear_control(struct obuf *ob, struct tpm2_clear_control_cmd *command_body)
{
int rc = 0;
struct tpm2_session_header session_header;
@@ -301,9 +290,8 @@
switch (*sub_command) {
case TPM2_CR50_SUB_CMD_IMMEDIATE_RESET:
- /* The 16-bit timeout parameter is optional for the
- * IMMEDIATE_RESET command. However in coreboot, the timeout
- * parameter must be specified.
+ /* The 16-bit timeout parameter is optional for the IMMEDIATE_RESET command.
+ * However in coreboot, the timeout parameter must be specified.
*/
rc |= obuf_write_be16(ob, sub_command[0]);
rc |= obuf_write_be16(ob, sub_command[1]);
@@ -320,13 +308,12 @@
break;
case TPM2_CR50_SUB_CMD_TPM_MODE:
/* The Cr50 TPM_MODE command supports an optional parameter.
- * When the parameter is present the Cr50 will attempt to change
- * the TPM state (enable or disable) and returns the new state
- * in the response. When the parameter is absent, the Cr50
- * returns the current TPM state.
+ * When the parameter is present the Cr50 will attempt to change the TPM state
+ * (enable or disable) and returns the new state in the response.
+ * When the parameter is absent, the Cr50 returns the current TPM state.
*
- * coreboot currently only uses the TPM get capability and does
- * not set a new TPM state with the Cr50.
+ * coreboot currently only uses the TPM get capability and does not set a new
+ * TPM state with the Cr50.
*/
rc |= obuf_write_be16(ob, *sub_command);
break;
@@ -335,8 +322,7 @@
break;
default:
/* Unsupported subcommand. */
- printk(BIOS_WARNING, "Unsupported cr50 subcommand: 0x%04x\n",
- *sub_command);
+ printk(BIOS_WARNING, "Unsupported cr50 subcommand: 0x%04x\n", *sub_command);
rc = -1;
break;
}
@@ -431,8 +417,7 @@
return rc;
}
-static int unmarshal_get_capability(struct ibuf *ib,
- struct get_cap_response *gcr)
+static int unmarshal_get_capability(struct ibuf *ib, struct get_cap_response *gcr)
{
int i;
int rc = 0;
@@ -473,14 +458,12 @@
return -1;
}
for (i = 0; i < gcr->cd.data.assignedPCR.count; i++) {
- TPMS_PCR_SELECTION *pp =
- &gcr->cd.data.assignedPCR.pcrSelections[i];
+ TPMS_PCR_SELECTION *pp = &gcr->cd.data.assignedPCR.pcrSelections[i];
rc |= ibuf_read(ib, pp, sizeof(TPMS_PCR_SELECTION));
}
break;
default:
- printk(BIOS_ERR,
- "%s:%d - unable to unmarshal capability response",
+ printk(BIOS_ERR, "%s:%d - unable to unmarshal capability response",
__func__, __LINE__);
printk(BIOS_ERR, " for %d\n", gcr->cd.capability);
rc = -1;
@@ -490,8 +473,7 @@
return rc;
}
-static int unmarshal_TPM2B_MAX_NV_BUFFER(struct ibuf *ib,
- TPM2B_MAX_NV_BUFFER *nv_buffer)
+static int unmarshal_TPM2B_MAX_NV_BUFFER(struct ibuf *ib, TPM2B_MAX_NV_BUFFER *nv_buffer)
{
if (ibuf_read_be16(ib, &nv_buffer->t.size))
return -1;
@@ -499,10 +481,8 @@
nv_buffer->t.buffer = ibuf_oob_drain(ib, nv_buffer->t.size);
if (nv_buffer->t.buffer == NULL) {
- printk(BIOS_ERR, "%s:%d - "
- "size mismatch: expected %d, remaining %zd\n",
- __func__, __LINE__, nv_buffer->t.size,
- ibuf_remaining(ib));
+ printk(BIOS_ERR, "%s:%d - size mismatch: expected %d, remaining %zd\n",
+ __func__, __LINE__, nv_buffer->t.size, ibuf_remaining(ib));
return -1;
}
@@ -520,20 +500,17 @@
if (nvr->params_size !=
(nvr->buffer.t.size + sizeof(nvr->buffer.t.size))) {
- printk(BIOS_ERR,
- "%s:%d - parameter/buffer %d/%d size mismatch",
- __func__, __LINE__, nvr->params_size,
- nvr->buffer.t.size);
+ printk(BIOS_ERR, "%s:%d - parameter/buffer %d/%d size mismatch",
+ __func__, __LINE__, nvr->params_size, nvr->buffer.t.size);
return -1;
}
/*
- * Let's ignore the authorization section. It should be 5 bytes total,
- * just confirm that this is the case and report any discrepancy.
+ * Let's ignore the authorization section. It should be 5 bytes total, just confirm that
+ * this is the case and report any discrepancy.
*/
if (ibuf_remaining(ib) != 5)
- printk(BIOS_ERR,
- "%s:%d - unexpected authorization section size %zd\n",
+ printk(BIOS_ERR, "%s:%d - unexpected authorization section size %zd\n",
__func__, __LINE__, ibuf_remaining(ib));
ibuf_oob_drain(ib, ibuf_remaining(ib));
@@ -541,8 +518,7 @@
return 0;
}
-static int unmarshal_vendor_command(struct ibuf *ib,
- struct vendor_command_response *vcr)
+static int unmarshal_vendor_command(struct ibuf *ib, struct vendor_command_response *vcr)
{
if (ibuf_read_be16(ib, &vcr->vc_subcommand))
return -1;
@@ -561,9 +537,8 @@
case TPM2_CR50_SUB_CMD_GET_BOOT_MODE:
return ibuf_read_be8(ib, &vcr->boot_mode);
default:
- printk(BIOS_ERR,
- "%s:%d - unsupported vendor command %#04x!\n",
- __func__, __LINE__, vcr->vc_subcommand);
+ printk(BIOS_ERR, "%s:%d - unsupported vendor command %#04x!\n", __func__,
+ __LINE__, vcr->vc_subcommand);
return -1;
}
@@ -583,15 +558,14 @@
return NULL;
if (ibuf_capacity(ib) != tpm2_static_resp.hdr.tpm_size) {
- printk(BIOS_ERR,
- "%s: size mismatch in response to command %#x\n",
- __func__, command);
+ printk(BIOS_ERR, "%s: size mismatch in response to command %#x\n", __func__,
+ command);
return NULL;
}
- /* On errors, we're not sure what the TPM is returning. None of the
- commands we use actually expect useful data payloads for errors, so
- just ignore any data after the header. */
+ /* On errors, we're not sure what the TPM is returning. None of the commands we use
+ * actually expect useful data payloads for errors, so just ignore any data after the
+ * header. */
if (tpm2_static_resp.hdr.tpm_code != TPM2_RC_SUCCESS)
return &tpm2_static_resp;
@@ -630,10 +604,8 @@
size_t sz_left;
const uint8_t *data;
- printk(BIOS_INFO, "%s:%d:"
- "Request to unmarshal unexpected command %#x,"
- " code %#x",
- __func__, __LINE__, command,
+ printk(BIOS_INFO, "%s:%d:Request to unmarshal unexpected command %#x,"
+ " code %#x", __func__, __LINE__, command,
tpm2_static_resp.hdr.tpm_code);
sz_left = ibuf_remaining(ib);
@@ -651,15 +623,13 @@
if (ibuf_remaining(ib)) {
printk(BIOS_INFO,
- "%s:%d got %d bytes back in response to %#x,"
- " failed to parse (%zd)\n",
- __func__, __LINE__, tpm2_static_resp.hdr.tpm_size,
- command, ibuf_remaining(ib));
+ "%s:%d got %d bytes back in response to %#x, failed to parse (%zd)\n",
+ __func__, __LINE__, tpm2_static_resp.hdr.tpm_size, command,
+ ibuf_remaining(ib));
return NULL;
}
if (rc)
- printk(BIOS_WARNING, "Warning: %s had one or more failures.\n",
- __func__);
+ printk(BIOS_WARNING, "Warning: %s had one or more failures.\n", __func__);
/* The entire message have been parsed. */
return &tpm2_static_resp;
diff --git a/src/security/tpm/tss/tcg-2.0/tss_marshaling.h b/src/security/tpm/tss/tcg-2.0/tss_marshaling.h
index 432cf5a3..76de5de 100644
--- a/src/security/tpm/tss/tcg-2.0/tss_marshaling.h
+++ b/src/security/tpm/tss/tcg-2.0/tss_marshaling.h
@@ -11,8 +11,7 @@
/**
* tpm_marshal_command
*
- * Given a structure containing a TPM2 command, serialize the structure for
- * sending it to the TPM.
+ * Given a structure containing a TPM2 command, serialize the structure for sending it to the TPM.
*
* @command: code of the TPM2 command to marshal
* @tpm_command_body: a pointer to the command specific structure
@@ -27,16 +26,15 @@
/**
* tpm_unmarshal_response
*
- * Given a buffer received from the TPM in response to a certain command,
- * deserialize the buffer into the expeced response structure.
+ * Given a buffer received from the TPM in response to a certain command, deserialize the buffer
+ * into the expeced response structure.
*
* struct tpm2_response is a union of all possible responses.
*
* @command: code of the TPM2 command for which a response is unmarshaled
* @ib: input buffer containing the serialized response.
*
- * Returns a pointer to the deserialized response or NULL in case of
- * unmarshaling problems.
+ * Returns a pointer to the deserialized response or NULL in case of unmarshaling problems.
*/
struct tpm2_response *tpm_unmarshal_response(TPM_CC command, struct ibuf *ib);
diff --git a/src/security/tpm/tss/tcg-2.0/tss_structures.h b/src/security/tpm/tss/tcg-2.0/tss_structures.h
index 1c7aa4b..0fa4d1a 100644
--- a/src/security/tpm/tss/tcg-2.0/tss_structures.h
+++ b/src/security/tpm/tss/tcg-2.0/tss_structures.h
@@ -4,8 +4,8 @@
#define TCG2_TSS_STRUCTURES_H_
/*
- * This file includes a subset of definitions of TPM protocol version 2.x
- * constants and structures needed for functions used in coreboot.
+ * This file includes a subset of definitions of TPM protocol version 2.x constants and
+ * structures needed for functions used in coreboot.
*/
#include <types.h>
#include "../common/tss_common.h"
@@ -89,8 +89,8 @@
#define TPM2_NV_Read ((TPM_CC)0x0000014E)
#define TPM2_GetCapability ((TPM_CC)0x0000017A)
#define TPM2_PCR_Extend ((TPM_CC)0x00000182)
-/* TPM2 specifies vendor commands need to have this bit set. Vendor command
- space is defined by the lower 16 bits. */
+/* TPM2 specifies vendor commands need to have this bit set. Vendor command space is defined by
+ * the lower 16 bits. */
#define TPM_CC_VENDOR_BIT_MASK 0x20000000
/* Table 15 - TPM_RC Constants (Actions) */
@@ -146,10 +146,9 @@
#define TPM_RC_NV_UNINITIALIZED ((TPM_RC)(RC_VER1 + 0x04A))
/*
- * Cr50 returns this code when an attempt is made to read an NV location which
- * has not yet been defined. This is an aggregation of various return code
- * extensions which may or may not match if a different TPM2 device is
- * used.
+ * Cr50 returns this code when an attempt is made to read an NV location which has not yet been
+ * defined. This is an aggregation of various return code extensions which may or may not match
+ * if a different TPM2 device is used.
*/
#define TPM_RC_CR50_NV_UNDEFINED 0x28b
@@ -359,8 +358,8 @@
};
/*
- * TPM session header for commands requiring session information. Also
- * included in the responses to those commands.
+ * TPM session header for commands requiring session information. Also included in the responses
+ * to those commands.
*/
struct tpm2_session_header {
uint32_t session_handle;
diff --git a/src/security/tpm/tss/vendor/cr50/cr50.c b/src/security/tpm/tss/vendor/cr50/cr50.c
index 3be1e5a..b6dff81 100644
--- a/src/security/tpm/tss/vendor/cr50/cr50.c
+++ b/src/security/tpm/tss/vendor/cr50/cr50.c
@@ -19,8 +19,7 @@
if (response == NULL || (response && response->hdr.tpm_code)) {
if (response)
- printk(BIOS_INFO, "%s: failed %x\n", __func__,
- response->hdr.tpm_code);
+ printk(BIOS_INFO, "%s: failed %x\n", __func__, response->hdr.tpm_code);
else
printk(BIOS_INFO, "%s: failed\n", __func__);
return TPM_E_IOERROR;
@@ -28,8 +27,7 @@
return TPM_SUCCESS;
}
-uint32_t tlcl_cr50_enable_update(uint16_t timeout_ms,
- uint8_t *num_restored_headers)
+uint32_t tlcl_cr50_enable_update(uint16_t timeout_ms, uint8_t *num_restored_headers)
{
struct tpm2_response *response;
uint16_t command_body[] = {
@@ -78,9 +76,8 @@
if (response->hdr.tpm_code == VENDOR_RC_INTERNAL_ERROR) {
/*
- * The Cr50 returns VENDOR_RC_INTERNAL_ERROR iff the key ladder
- * is disabled. The Cr50 requires a reboot to re-enable the key
- * ladder.
+ * The Cr50 returns VENDOR_RC_INTERNAL_ERROR iff the key ladder is disabled.
+ * The Cr50 requires a reboot to re-enable the key ladder.
*/
return TPM_E_MUST_REBOOT;
}
@@ -140,8 +137,7 @@
* Issue an immediate reset to the Cr50.
*/
printk(BIOS_INFO, "Issuing cr50 reset\n");
- response = tpm_process_command(TPM2_CR50_VENDOR_COMMAND,
- &reset_command_body);
+ response = tpm_process_command(TPM2_CR50_VENDOR_COMMAND, &reset_command_body);
if (!response)
return TPM_E_IOERROR;
--
To view, visit https://review.coreboot.org/c/coreboot/+/44412
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I1adaae4c9eeee1b8cb1a2d9467712762f7ccc6be
Gerrit-Change-Number: 44412
Gerrit-PatchSet: 1
Gerrit-Owner: HAOUAS Elyes <ehaouas(a)noos.fr>
Gerrit-MessageType: newchange
HAOUAS Elyes has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/44310 )
Change subject: src/cpu/x86/smm: Convert to 96 characters line length
......................................................................
src/cpu/x86/smm: Convert to 96 characters line length
Change-Id: I0ef58342a183071b59d8c74a6220acd4a1ffb019
Signed-off-by: Elyes HAOUAS <ehaouas(a)noos.fr>
---
M src/cpu/x86/smm/smihandler.c
M src/cpu/x86/smm/smm_module_handler.c
M src/cpu/x86/smm/smm_module_loader.c
3 files changed, 87 insertions(+), 119 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/10/44310/1
diff --git a/src/cpu/x86/smm/smihandler.c b/src/cpu/x86/smm/smihandler.c
index 8fd95bb..b70f757 100644
--- a/src/cpu/x86/smm/smihandler.c
+++ b/src/cpu/x86/smm/smihandler.c
@@ -36,8 +36,7 @@
typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
/* SMI multiprocessing semaphore */
-static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status
- = SMI_UNLOCKED;
+static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status = SMI_UNLOCKED;
static int smi_obtain_lock(void)
{
@@ -74,9 +73,7 @@
void io_trap_handler(int smif)
{
- /* If a handler function handled a given IO trap, it
- * shall return a non-zero value
- */
+ /* If a handler function handled a given IO trap, it shall return a non-zero value */
printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
if (southbridge_io_trap_handler(smif))
@@ -141,13 +138,12 @@
/* Are we ok to execute the handler? */
if (!smi_obtain_lock()) {
- /* For security reasons we don't release the other CPUs
- * until the CPU with the lock is actually done
+ /* For security reasons we don't release the other CPUs until the CPU with the
+ * lock is actually done
*/
while (smi_handler_status == SMI_LOCKED) {
asm volatile (
- ".byte 0xf3, 0x90\n" /* hint a CPU we are in
- * spinlock (PAUSE
+ ".byte 0xf3, 0x90\n" /* hint a CPU we are in spinlock (PAUSE
* instruction, REP NOP)
*/
);
@@ -167,34 +163,30 @@
case 0x00030002:
case 0x00030007:
state_save.type = LEGACY;
- state_save.legacy_state_save =
- smm_save_state(smm_base,
- SMM_LEGACY_ARCH_OFFSET, node);
+ state_save.legacy_state_save = smm_save_state(smm_base, SMM_LEGACY_ARCH_OFFSET,
+ node);
break;
case 0x00030100:
state_save.type = EM64T100;
- state_save.em64t100_state_save =
- smm_save_state(smm_base,
- SMM_EM64T100_ARCH_OFFSET, node);
+ state_save.em64t100_state_save = smm_save_state(smm_base,
+ SMM_EM64T100_ARCH_OFFSET, node);
break;
case 0x00030101: /* SandyBridge, IvyBridge, and Haswell */
state_save.type = EM64T101;
- state_save.em64t101_state_save =
- smm_save_state(smm_base,
- SMM_EM64T101_ARCH_OFFSET, node);
+ state_save.em64t101_state_save = smm_save_state(smm_base,
+ SMM_EM64T101_ARCH_OFFSET, node);
break;
case 0x00020064:
case 0x00030064:
state_save.type = AMD64;
- state_save.amd64_state_save =
- smm_save_state(smm_base,
- SMM_AMD64_ARCH_OFFSET, node);
+ state_save.amd64_state_save = smm_save_state(smm_base, SMM_AMD64_ARCH_OFFSET,
+ node);
break;
default:
printk(BIOS_DEBUG, "smm_revision: 0x%08x\n", smm_revision);
printk(BIOS_DEBUG, "SMI# not supported on your CPU\n");
- /* Don't release lock, so no further SMI will happen,
- * if we don't handle it anyways.
+ /* Don't release lock, so no further SMI will happen, if we don't handle it
+ * anyways.
*/
return;
}
@@ -218,10 +210,9 @@
smi_set_eos();
}
-/* Provide a default implementation for all weak handlers so that relocation
- * entries in the modules make sense. Without default implementations the
- * weak relocations w/o a symbol have a 0 address which is where the modules
- * are linked at. */
+/* Provide a default implementation for all weak handlers so that relocation entries in the
+ * modules make sense. Without default implementations the weak relocations w/o a symbol have
+ * a 0 address which is where the modules are linked at. */
int __weak mainboard_io_trap_handler(int smif) { return 0; }
void __weak southbridge_smi_handler(void) {}
void __weak mainboard_smi_gpi(u32 gpi_sts) {}
diff --git a/src/cpu/x86/smm/smm_module_handler.c b/src/cpu/x86/smm/smm_module_handler.c
index 02682b4..9d88245 100644
--- a/src/cpu/x86/smm/smm_module_handler.c
+++ b/src/cpu/x86/smm/smm_module_handler.c
@@ -47,9 +47,7 @@
void io_trap_handler(int smif)
{
- /* If a handler function handled a given IO trap, it
- * shall return a non-zero value
- */
+ /* If a handler function handled a given IO trap, it shall return a non-zero value */
printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
if (southbridge_io_trap_handler(smif))
@@ -97,8 +95,8 @@
{
char *base;
- /* This function assumes all save states start at top of default
- * SMRAM size space and are staggered down by save state size. */
+ /* This function assumes all save states start at top of default SMRAM size space and
+ * are staggered down by save state size. */
base = (void *)smm_runtime->smbase;
base += SMM_DEFAULT_SIZE;
base -= (cpu + 1) * smm_runtime->save_state_size;
@@ -127,22 +125,21 @@
cpu = p->cpu;
expected_canary = (uintptr_t)p->canary;
- /* Make sure to set the global runtime. It's OK to race as the value
- * will be the same across CPUs as well as multiple SMIs. */
+ /* Make sure to set the global runtime. It's OK to race as the value will be the same
+ * across CPUs as well as multiple SMIs. */
if (smm_runtime == NULL)
smm_runtime = runtime;
if (cpu >= CONFIG_MAX_CPUS) {
console_init();
- printk(BIOS_CRIT,
- "Invalid CPU number assigned in SMM stub: %d\n", cpu);
+ printk(BIOS_CRIT, "Invalid CPU number assigned in SMM stub: %d\n", cpu);
return;
}
/* Are we ok to execute the handler? */
if (!smi_obtain_lock()) {
- /* For security reasons we don't release the other CPUs
- * until the CPU with the lock is actually done */
+ /* For security reasons we don't release the other CPUs until the CPU with the
+ * lock is actually done */
while (smi_handler_status == SMI_LOCKED) {
asm volatile (
".byte 0xf3, 0x90\n" /* PAUSE */
@@ -174,8 +171,7 @@
actual_canary = *p->canary;
if (actual_canary != expected_canary) {
- printk(BIOS_DEBUG, "canary 0x%lx != 0x%lx\n", actual_canary,
- expected_canary);
+ printk(BIOS_DEBUG, "canary 0x%lx != 0x%lx\n", actual_canary, expected_canary);
// Don't die if we can't indicate an error.
if (CONFIG(DEBUG_SMI))
@@ -190,10 +186,9 @@
RMODULE_ENTRY(smm_handler_start);
-/* Provide a default implementation for all weak handlers so that relocation
- * entries in the modules make sense. Without default implementations the
- * weak relocations w/o a symbol have a 0 address which is where the modules
- * are linked at. */
+/* Provide a default implementation for all weak handlers so that relocation entries in the
+ * modules make sense. Without default implementations the weak relocations w/o a symbol have
+ * a 0 address which is where the modules are linked at. */
int __weak mainboard_io_trap_handler(int smif) { return 0; }
void __weak cpu_smi_handler(void) {}
void __weak northbridge_smi_handler() {}
diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c
index fc1e1b3..15c870b 100644
--- a/src/cpu/x86/smm/smm_module_loader.c
+++ b/src/cpu/x86/smm/smm_module_loader.c
@@ -10,9 +10,8 @@
#define FXSAVE_SIZE 512
-/* FXSAVE area during relocation. While it may not be strictly needed the
- SMM stub code relies on the FXSAVE area being non-zero to enable SSE
- instructions within SMM mode. */
+/* FXSAVE area during relocation. While it may not be strictly needed the SMM stub code relies
+ * on the FXSAVE area being non-zero to enable SSE instructions within SMM mode. */
static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
__attribute__((aligned(16)));
@@ -26,8 +25,8 @@
* The components are assumed to consist of one consecutive region.
*/
-/* These parameters are used by the SMM stub code. A pointer to the params
- * is also passed to the C-base handler. */
+/* These parameters are used by the SMM stub code. A pointer to the params is also passed to the
+ * C-base handler. */
struct smm_stub_params {
u32 stack_size;
u32 stack_top;
@@ -39,8 +38,8 @@
} __packed;
/*
- * The stub is the entry point that sets up protected mode and stacks for each
- * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
+ * The stub is the entry point that sets up protected mode and stacks for each CPU. It then
+ * calls into the SMM handler module. It is encoded as an rmodule.
*/
extern unsigned char _binary_smmstub_start[];
@@ -48,8 +47,8 @@
#define SMM_MINIMUM_STACK_SIZE 32
/*
- * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry
- * addresses across CPUs.
+ * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry addresses
+ * across CPUs.
*
* 0xe9 <16-bit relative target> ; jmp <relative-offset>
*/
@@ -59,9 +58,9 @@
} __packed;
/*
- * Place the entry instructions for num entries beginning at entry_start with
- * a given stride. The entry_start is the highest entry point's address. All
- * other entry points are stride size below the previous.
+ * Place the entry instructions for num entries beginning at entry_start with a given stride.
+ * The entry_start is the highest entry point's address. All other entry points are stride size
+ * below the previous.
*/
static void smm_place_jmp_instructions(void *entry_start, size_t stride,
size_t num, void *jmp_target)
@@ -70,20 +69,18 @@
char *cur;
struct smm_entry_ins entry = { .jmp_rel = 0xe9 };
- /* Each entry point has an IP value of 0x8000. The SMBASE for each
- * CPU is different so the effective address of the entry instruction
- * is different. Therefore, the relative displacement for each entry
- * instruction needs to be updated to reflect the current effective
- * IP. Additionally, the IP result from the jmp instruction is
- * calculated using the next instruction's address so the size of
- * the jmp instruction needs to be taken into account. */
+ /* Each entry point has an IP value of 0x8000. The SMBASE for each CPU is different so
+ * the effective address of the entry instruction is different. Therefore, the relative
+ * displacement for each entry instruction needs to be updated to reflect the current
+ * effective IP. Additionally, the IP result from the jmp instruction is calculated
+ * using the next instruction's address so the size of the jmp instruction needs to be
+ * taken into account. */
cur = entry_start;
for (i = 0; i < num; i++) {
uint32_t disp = (uintptr_t)jmp_target;
disp -= sizeof(entry) + (uintptr_t)cur;
- printk(BIOS_DEBUG,
- "SMM Module: placing jmp sequence at %p rel16 0x%04x\n",
+ printk(BIOS_DEBUG, "SMM Module: placing jmp sequence at %p rel16 0x%04x\n",
cur, disp);
entry.rel16 = disp;
memcpy(cur, &entry, sizeof(entry));
@@ -91,10 +88,9 @@
}
}
-/* Place stacks in base -> base + size region, but ensure the stacks don't
- * overlap the staggered entry points. */
-static void *smm_stub_place_stacks(char *base, size_t size,
- struct smm_loader_params *params)
+/* Place stacks in base -> base + size region, but ensure the stacks don't overlap the staggered
+ * entry points. */
+static void *smm_stub_place_stacks(char *base, size_t size, struct smm_loader_params *params)
{
size_t total_stack_size;
char *stacks_top;
@@ -102,10 +98,8 @@
if (params->stack_top != NULL)
return params->stack_top;
- /* If stack space is requested assume the space lives in the lower
- * half of SMRAM. */
- total_stack_size = params->per_cpu_stack_size *
- params->num_concurrent_stacks;
+ /* If stack space is requested assume the space lives in the lower half of SMRAM. */
+ total_stack_size = params->per_cpu_stack_size * params->num_concurrent_stacks;
/* There has to be at least one stack user. */
if (params->num_concurrent_stacks < 1)
@@ -121,9 +115,8 @@
return stacks_top;
}
-/* Place the staggered entry points for each CPU. The entry points are
- * staggered by the per CPU SMM save state size extending down from
- * SMM_ENTRY_OFFSET. */
+/* Place the staggered entry points for each CPU. The entry points are staggered by the per
+ * CPU SMM save state size extending down from SMM_ENTRY_OFFSET. */
static void smm_stub_place_staggered_entry_points(char *base,
const struct smm_loader_params *params, const struct rmodule *smm_stub)
{
@@ -131,40 +124,36 @@
stub_entry_offset = rmodule_entry_offset(smm_stub);
- /* If there are staggered entry points or the stub is not located
- * at the SMM entry point then jmp instructions need to be placed. */
+ /* If there are staggered entry points or the stub is not located at the SMM entry point
+ * then jmp instructions need to be placed. */
if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
size_t num_entries;
base += SMM_ENTRY_OFFSET;
num_entries = params->num_concurrent_save_states;
- /* Adjust beginning entry and number of entries down since
- * the initial entry point doesn't need a jump sequence. */
+ /* Adjust beginning entry and number of entries down since the initial entry
+ * point doesn't need a jump sequence. */
if (stub_entry_offset == 0) {
base -= params->per_cpu_save_state_size;
num_entries--;
}
- smm_place_jmp_instructions(base,
- params->per_cpu_save_state_size,
- num_entries,
+ smm_place_jmp_instructions(base, params->per_cpu_save_state_size, num_entries,
rmodule_entry(smm_stub));
}
}
/*
- * The stub setup code assumes it is completely contained within the
- * default SMRAM size (0x10000). There are potentially 3 regions to place
- * within the default SMRAM size:
+ * The stub setup code assumes it is completely contained within the default
+ * SMRAM size (0x10000). There are potentially 3 regions to place within the default SMRAM size:
* 1. Save state areas
* 2. Stub code
* 3. Stack areas
*
- * The save state and stack areas are treated as contiguous for the number of
- * concurrent areas requested. The save state always lives at the top of SMRAM
- * space, and the entry point is at offset 0x8000.
+ * The save state and stack areas are treated as contiguous for the number of concurrent areas
+ * requested. The save state always lives at the top of SMRAM space, and the entry point is at
+ * offset 0x8000.
*/
-static int smm_module_setup_stub(void *smbase, size_t smm_size,
- struct smm_loader_params *params,
+static int smm_module_setup_stub(void *smbase, size_t smm_size, struct smm_loader_params *params,
void *fxsave_area)
{
size_t total_save_state_size;
@@ -209,8 +198,8 @@
smm_stub_size = rmodule_memory_size(&smm_stub);
stub_entry_offset = rmodule_entry_offset(&smm_stub);
- /* Assume the stub is always small enough to live within upper half of
- * SMRAM region after the save state space has been allocated. */
+ /* Assume the stub is always small enough to live within upper half of SMRAM region
+ * after the save state space has been allocated. */
smm_stub_loc = &base[SMM_ENTRY_OFFSET];
/* Adjust for jmp instruction sequence. */
@@ -229,10 +218,9 @@
/* The stacks, if requested, live in the lower half of SMRAM space. */
size = SMM_ENTRY_OFFSET;
- /* Ensure stacks don't encroach onto staggered SMM
- * entry points. The staggered entry points extend
- * below SMM_ENTRY_OFFSET by the number of concurrent
- * save states - 1 and save state size. */
+ /* Ensure stacks don't encroach onto staggered SMM entry points. The staggered entry
+ * points extend below SMM_ENTRY_OFFSET by the number of concurrent save states - 1 and
+ * save state size. */
if (params->num_concurrent_save_states > 1) {
size -= total_save_state_size;
size += params->per_cpu_save_state_size;
@@ -278,17 +266,16 @@
}
/*
- * smm_setup_relocation_handler assumes the callback is already loaded in
- * memory. i.e. Another SMM module isn't chained to the stub. The other
- * assumption is that the stub will be entered from the default SMRAM
- * location: 0x30000 -> 0x40000.
+ * smm_setup_relocation_handler assumes the callback is already loaded in memory. i.e. Another
+ * SMM module isn't chained to the stub. The other assumption is that the stub will be entered
+ * from the default SMRAM location: 0x30000 -> 0x40000.
*/
int smm_setup_relocation_handler(struct smm_loader_params *params)
{
void *smram = (void *)SMM_DEFAULT_BASE;
- /* There can't be more than 1 concurrent save state for the relocation
- * handler because all CPUs default to 0x30000 as SMBASE. */
+ /* There can't be more than 1 concurrent save state for the relocation handler because
+ * all CPUs default to 0x30000 as SMBASE. */
if (params->num_concurrent_save_states > 1)
return -1;
@@ -296,17 +283,15 @@
if (params->handler == NULL)
return -1;
- /* Since the relocation handler always uses stack, adjust the number
- * of concurrent stack users to be CONFIG_MAX_CPUS. */
+ /* Since the relocation handler always uses stack, adjust the number of concurrent stack
+ * users to be CONFIG_MAX_CPUS. */
if (params->num_concurrent_stacks == 0)
params->num_concurrent_stacks = CONFIG_MAX_CPUS;
- return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE,
- params, fxsave_area_relocation);
+ return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, params, fxsave_area_relocation);
}
-/* The SMM module is placed within the provided region in the following
- * manner:
+/* The SMM module is placed within the provided region in the following manner:
* +-----------------+ <- smram + size
* | BIOS resource |
* | list (STM) |
@@ -322,10 +307,9 @@
* | stub code |
* +-----------------+ <- smram
*
- * It should be noted that this algorithm will not work for
- * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
- * expects a region large enough to encompass the handler and stacks
- * as well as the SMM_DEFAULT_SIZE.
+ * It should be noted that this algorithm will not work for SMM_DEFAULT_SIZE SMRAM regions such
+ * as the A segment. This algorithm expects a region large enough to encompass the handler and
+ * stacks as well as the SMM_DEFAULT_SIZE.
*/
int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
{
@@ -350,8 +334,7 @@
if (CONFIG(DEBUG_SMI))
memset(smram, 0xcd, size);
- total_stack_size = params->per_cpu_stack_size *
- params->num_concurrent_stacks;
+ total_stack_size = params->per_cpu_stack_size * params->num_concurrent_stacks;
/* Stacks start at the top of the region. */
base = smram;
@@ -362,14 +345,13 @@
params->stack_top = base;
- /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment
- * taken into account. */
+ /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment taken into
+ * account. */
base = smram;
base += SMM_DEFAULT_SIZE;
handler_size = rmodule_memory_size(&smm_mod);
module_alignment = rmodule_load_alignment(&smm_mod);
- alignment_size = module_alignment -
- ((uintptr_t)base % module_alignment);
+ alignment_size = module_alignment - ((uintptr_t)base % module_alignment);
if (alignment_size != module_alignment) {
handler_size += alignment_size;
base += alignment_size;
--
To view, visit https://review.coreboot.org/c/coreboot/+/44310
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I0ef58342a183071b59d8c74a6220acd4a1ffb019
Gerrit-Change-Number: 44310
Gerrit-PatchSet: 1
Gerrit-Owner: HAOUAS Elyes <ehaouas(a)noos.fr>
Gerrit-MessageType: newchange
HAOUAS Elyes has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/44314 )
Change subject: cpu/x86/{backup_default_smm,mp_init}.c: Convert to 96 characters line length
......................................................................
cpu/x86/{backup_default_smm,mp_init}.c: Convert to 96 characters line length
Change-Id: I8ba63f6fd032bc234766cce2fe4d66cb50aaf913
Signed-off-by: Elyes HAOUAS <ehaouas(a)noos.fr>
---
M src/cpu/x86/backup_default_smm.c
M src/cpu/x86/mp_init.c
2 files changed, 73 insertions(+), 96 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/14/44314/1
diff --git a/src/cpu/x86/backup_default_smm.c b/src/cpu/x86/backup_default_smm.c
index 1672e0b..080d993 100644
--- a/src/cpu/x86/backup_default_smm.c
+++ b/src/cpu/x86/backup_default_smm.c
@@ -15,9 +15,8 @@
return NULL;
/*
- * The buffer needs to be preallocated regardless. In the non-resume
- * path it will be allocated for handling resume. Note that cbmem_add()
- * does a find before the addition.
+ * The buffer needs to be preallocated regardless. In the non-resume path it will be
+ * allocated for handling resume. Note that cbmem_add() does a find before the addition.
*/
save_area = cbmem_add(CBMEM_ID_SMM_SAVE_SPACE, SMM_DEFAULT_SIZE);
@@ -33,8 +32,7 @@
}
/*
- * Not the S3 resume path. No need to restore memory contents after
- * SMM relocation.
+ * Not the S3 resume path. No need to restore memory contents after SMM relocation.
*/
return NULL;
}
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c
index caed8f4..1c2e641 100644
--- a/src/cpu/x86/mp_init.c
+++ b/src/cpu/x86/mp_init.c
@@ -39,17 +39,15 @@
static char processor_name[49];
/*
- * A mp_flight_record details a sequence of calls for the APs to perform
- * along with the BSP to coordinate sequencing. Each flight record either
- * provides a barrier for each AP before calling the callback or the APs
- * are allowed to perform the callback without waiting. Regardless, each
- * record has the cpus_entered field incremented for each record. When
- * the BSP observes that the cpus_entered matches the number of APs
- * the bsp_call is called with bsp_arg and upon returning releases the
- * barrier allowing the APs to make further progress.
+ * A mp_flight_record details a sequence of calls for the APs to perform along with the BSP to
+ * coordinate sequencing. Each flight record either provides a barrier for each AP before
+ * calling the callback or the APs are allowed to perform the callback without waiting.
+ * Regardless, each record has the cpus_entered field incremented for each record. When the BSP
+ * observes that the cpus_entered matches the number of APs the bsp_call is called with bsp_arg
+ * and upon returning releases the barrier allowing the APs to make further progress.
*
- * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
- * callback will just not be called.
+ * Note that ap_call() and bsp_call() can be NULL. In the NULL case the callback will just not
+ * be called.
*/
struct mp_flight_record {
atomic_t barrier;
@@ -72,8 +70,7 @@
#define MP_FR_NOBLOCK_APS(ap_func_, bsp_func_) \
_MP_FLIGHT_RECORD(1, ap_func_, bsp_func_)
-/* The mp_params structure provides the arguments to the mp subsystem
- * for bringing up APs. */
+/* The mp_params structure provides the arguments to the mp subsystem for bringing up APs. */
struct mp_params {
int num_cpus; /* Total cpus include BSP */
int parallel_microcode_load;
@@ -110,9 +107,9 @@
/* The sipi vector rmodule is included in the ramstage using 'objdump -B'. */
extern char _binary_sipi_vector_start[];
-/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the
- * memory range is already reserved so the OS cannot use it. That region is
- * free to use for AP bringup before SMM is initialized. */
+/* The SIPI vector is loaded at the SMM_DEFAULT_BASE. The reason is at the memory range is
+ * already reserved so the OS cannot use it. That region is free to use for AP bringup before
+ * SMM is initialized. */
static const uint32_t sipi_vector_location = SMM_DEFAULT_BASE;
static const int sipi_vector_location_size = SMM_DEFAULT_SIZE;
@@ -141,8 +138,7 @@
}
/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
-static int wait_for_aps(atomic_t *val, int target, int total_delay,
- int delay_step)
+static int wait_for_aps(atomic_t *val, int target, int total_delay, int delay_step)
{
int timeout = 0;
int delayed = 0;
@@ -178,8 +174,7 @@
stop_this_cpu();
}
-/* By the time APs call ap_init() caching has been setup, and microcode has
- * been loaded. */
+/* By the time APs call ap_init() caching has been setup, and microcode has been loaded. */
static void asmlinkage ap_init(unsigned int cpu)
{
struct cpu_info *info;
@@ -197,8 +192,7 @@
/* Fix up APIC id with reality. */
info->cpu->path.apic.apic_id = lapicid();
- printk(BIOS_INFO, "AP: slot %d apic_id %x.\n", cpu,
- info->cpu->path.apic.apic_id);
+ printk(BIOS_INFO, "AP: slot %d apic_id %x.\n", cpu, info->cpu->path.apic.apic_id);
/* Walk the flight plan */
ap_do_flight_plan();
@@ -313,8 +307,8 @@
module_size = ALIGN_UP(module_size, 4);
if (module_size > loc_size) {
- printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n",
- module_size, loc_size);
+ printk(BIOS_CRIT, "SIPI module size (%d) > region size (%d).\n", module_size,
+ loc_size);
return ap_count;
}
@@ -363,8 +357,8 @@
max_cpus = p->num_cpus;
if (max_cpus > CONFIG_MAX_CPUS) {
- printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n",
- max_cpus, CONFIG_MAX_CPUS);
+ printk(BIOS_CRIT, "CPU count(%d) exceeds CONFIG_MAX_CPUS(%d)\n", max_cpus,
+ CONFIG_MAX_CPUS);
max_cpus = CONFIG_MAX_CPUS;
}
@@ -376,8 +370,8 @@
/* Build the CPU device path */
cpu_path.type = DEVICE_PATH_APIC;
- /* Assuming linear APIC space allocation. AP will set its own
- APIC id in the ap_init() path above. */
+ /* Assuming linear APIC space allocation. AP will set its own APIC id in the
+ * ap_init() path above. */
cpu_path.apic.apic_id = info->cpu->path.apic.apic_id + i;
/* Allocate the new CPU device structure */
@@ -425,8 +419,7 @@
sipi_vector = sipi_vector_location >> 12;
if (sipi_vector > max_vector_loc) {
- printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n",
- sipi_vector);
+ printk(BIOS_CRIT, "SIPI vector too large! 0x%08x\n", sipi_vector);
return -1;
}
@@ -443,8 +436,7 @@
/* Send INIT IPI to all but self. */
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
- lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
- LAPIC_DM_INIT);
+ lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | LAPIC_DM_INIT);
printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
mdelay(10);
@@ -459,8 +451,8 @@
}
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
- lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
- LAPIC_DM_STARTUP | sipi_vector);
+ lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | LAPIC_DM_STARTUP |
+ sipi_vector);
printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
printk(BIOS_DEBUG, "timed out.\n");
@@ -510,9 +502,8 @@
int ret = 0;
/*
* Set time out for flight plan to a huge minimum value (>=1 second).
- * CPUs with many APs may take longer if there is contention for
- * resources such as UART, so scale the time out up by increments of
- * 100ms if needed.
+ * CPUs with many APs may take longer if there is contention for resources such as UART,
+ * so scale the time out up by increments of 100ms if needed.
*/
const int timeout_us = MAX(1000000, 100000 * mp_params->num_cpus);
const int step_us = 100;
@@ -527,8 +518,7 @@
/* Wait for APs if the record is not released. */
if (atomic_read(&rec->barrier) == 0) {
/* Wait for the APs to check in. */
- if (wait_for_aps(&rec->cpus_entered, num_aps,
- timeout_us, step_us)) {
+ if (wait_for_aps(&rec->cpus_entered, num_aps, timeout_us, step_us)) {
printk(BIOS_ERR, "MP record %d timeout.\n", i);
ret = -1;
}
@@ -540,8 +530,7 @@
release_barrier(&rec->barrier);
}
- printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__,
- stopwatch_duration_msecs(&sw));
+ printk(BIOS_INFO, "%s done after %ld msecs.\n", __func__, stopwatch_duration_msecs(&sw));
return ret;
}
@@ -574,21 +563,20 @@
}
/*
- * mp_init() will set up the SIPI vector and bring up the APs according to
- * mp_params. Each flight record will be executed according to the plan. Note
- * that the MP infrastructure uses SMM default area without saving it. It's
- * up to the chipset or mainboard to either e820 reserve this area or save this
- * region prior to calling mp_init() and restoring it after mp_init returns.
+ * mp_init() will set up the SIPI vector and bring up the APs according to mp_params. Each
+ * flight record will be executed according to the plan. Note that the MP infrastructure uses
+ * SMM default area without saving it. It's up to the chipset or mainboard to either e820
+ * reserve this area or save this region prior to calling mp_init() and restoring it after
+ * mp_init returns.
*
- * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
- * caching is enabled before running the flight plan.
+ * At the time mp_init() is called the MTRR MSRs are mirrored into APs then caching is enabled
+ * before running the flight plan.
*
* The MP initialization has the following properties:
* 1. APs are brought up in parallel.
* 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
- * Therefore, one cannot rely on this property or the order of devices in
- * the device tree unless the chipset or mainboard know the APIC ids
- * a priori.
+ * Therefore, one cannot rely on this property or the order of devices in the device tree
+ * unless the chipset or mainboard know the APIC ids a priori.
*
* mp_init() returns < 0 on error, 0 on success.
*/
@@ -608,8 +596,7 @@
num_cpus = allocate_cpu_devices(cpu_bus, p);
if (num_cpus < p->num_cpus) {
- printk(BIOS_CRIT,
- "ERROR: More cpus requested (%d) than supported (%d).\n",
+ printk(BIOS_CRIT, "ERROR: More cpus requested (%d) than supported (%d).\n",
p->num_cpus, num_cpus);
return -1;
}
@@ -623,16 +610,16 @@
if (ap_count == NULL)
return -1;
- /* Make sure SIPI data hits RAM so the APs that come up will see
- * the startup code even if the caches are disabled. */
+ /* Make sure SIPI data hits RAM so the APs that come up will see the startup code even
+ * if the caches are disabled. */
wbinvd();
/* Start the APs providing number of APs and the cpus_entered field. */
global_num_aps = p->num_cpus - 1;
if (start_aps(cpu_bus, global_num_aps, ap_count) < 0) {
mdelay(1000);
- printk(BIOS_DEBUG, "%d/%d eventually checked in?\n",
- atomic_read(ap_count), global_num_aps);
+ printk(BIOS_DEBUG, "%d/%d eventually checked in?\n", atomic_read(ap_count),
+ global_num_aps);
return -1;
}
@@ -722,9 +709,9 @@
}
/*
- * The permanent handler runs with all cpus concurrently. Precalculate
- * the location of the new SMBASE. If using SMM modules then this
- * calculation needs to match that of the module loader.
+ * The permanent handler runs with all cpus concurrently. Precalculate the location of
+ * the new SMBASE. If using SMM modules then this calculation needs to match that of the
+ * module loader.
*/
perm_smbase = mp_state.perm_smbase;
perm_smbase -= cpu * runtime->save_state_size;
@@ -737,13 +724,10 @@
if (CONFIG(STM)) {
uintptr_t mseg;
- mseg = mp_state.perm_smbase +
- (mp_state.perm_smsize - CONFIG_MSEG_SIZE);
+ mseg = mp_state.perm_smbase + (mp_state.perm_smsize - CONFIG_MSEG_SIZE);
- stm_setup(mseg, p->cpu,
- perm_smbase,
- mp_state.perm_smbase,
- runtime->start32_offset);
+ stm_setup(mseg, p->cpu, perm_smbase, mp_state.perm_smbase,
+ runtime->start32_offset);
}
}
@@ -778,11 +762,11 @@
return 0;
}
-static int install_permanent_handler(int num_cpus, uintptr_t smbase,
- size_t smsize, size_t save_state_size)
+static int install_permanent_handler(int num_cpus, uintptr_t smbase, size_t smsize,
+ size_t save_state_size)
{
- /* There are num_cpus concurrent stacks and num_cpus concurrent save
- * state areas. Lastly, set the stack size to 1KiB. */
+ /* There are num_cpus concurrent stacks and num_cpus concurrent save state areas.
+ * Lastly, set the stack size to 1KiB. */
struct smm_loader_params smm_params = {
.per_cpu_stack_size = CONFIG_SMM_MODULE_STACK_SIZE,
.num_concurrent_stacks = num_cpus,
@@ -830,8 +814,8 @@
wbinvd();
/*
- * Indicate that the SMM handlers have been loaded and MP
- * initialization is about to start.
+ * Indicate that the SMM handlers have been loaded and MP initialization is about to
+ * start.
*/
if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL)
mp_state.ops.pre_mp_smm_init();
@@ -950,19 +934,20 @@
memcpy(&lcb, cb, sizeof(lcb));
mfence();
store_callback(per_cpu_slot, NULL);
- if (lcb.logical_cpu_number && (cur_cpu !=
- lcb.logical_cpu_number))
+ if (lcb.logical_cpu_number && (cur_cpu != lcb.logical_cpu_number))
continue;
else
lcb.func(lcb.arg);
}
}
-int mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num,
- long expire_us)
+int mp_run_on_aps(void (*func)(void *), void *arg, int logical_cpu_num, long expire_us)
{
- struct mp_callback lcb = { .func = func, .arg = arg,
- .logical_cpu_number = logical_cpu_num};
+ struct mp_callback lcb = {
+ .func = func,
+ .arg = arg,
+ .logical_cpu_number = logical_cpu_num
+ };
return run_ap_work(&lcb, expire_us);
}
@@ -983,17 +968,14 @@
stopwatch_init(&sw);
- ret = mp_run_on_aps(park_this_cpu, NULL, MP_RUN_ON_ALL_CPUS,
- 1000 * USECS_PER_MSEC);
+ ret = mp_run_on_aps(park_this_cpu, NULL, MP_RUN_ON_ALL_CPUS, 1000 * USECS_PER_MSEC);
duration_msecs = stopwatch_duration_msecs(&sw);
if (!ret)
- printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__,
- duration_msecs);
+ printk(BIOS_DEBUG, "%s done after %ld msecs.\n", __func__, duration_msecs);
else
- printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__,
- duration_msecs);
+ printk(BIOS_ERR, "%s failed after %ld msecs.\n", __func__, duration_msecs);
return ret;
}
@@ -1012,8 +994,7 @@
static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops)
{
/*
- * Make copy of the ops so that defaults can be set in the non-const
- * structure if needed.
+ * Make copy of the ops so that defaults can be set in the non-const structure if needed.
*/
memcpy(&state->ops, ops, sizeof(*ops));
@@ -1033,11 +1014,9 @@
}
/*
- * Default to smm_initiate_relocation() if trigger callback isn't
- * provided.
+ * Default to smm_initiate_relocation() if trigger callback isn't provided.
*/
- if (CONFIG(HAVE_SMI_HANDLER) &&
- ops->per_cpu_smm_trigger == NULL)
+ if (CONFIG(HAVE_SMI_HANDLER) && ops->per_cpu_smm_trigger == NULL)
mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation;
}
@@ -1061,7 +1040,7 @@
/* Sanity check SMM state. */
if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 &&
- mp_state.ops.relocation_handler != NULL)
+ mp_state.ops.relocation_handler != NULL)
smm_enable();
if (is_smm_enabled())
@@ -1071,7 +1050,7 @@
/* Gather microcode information. */
if (mp_state.ops.get_microcode_info != NULL)
mp_state.ops.get_microcode_info(&mp_params.microcode_pointer,
- &mp_params.parallel_microcode_load);
+ &mp_params.parallel_microcode_load);
mp_params.flight_plan = &mp_steps[0];
mp_params.num_records = ARRAY_SIZE(mp_steps);
--
To view, visit https://review.coreboot.org/c/coreboot/+/44314
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I8ba63f6fd032bc234766cce2fe4d66cb50aaf913
Gerrit-Change-Number: 44314
Gerrit-PatchSet: 1
Gerrit-Owner: HAOUAS Elyes <ehaouas(a)noos.fr>
Gerrit-MessageType: newchange
9elements QA has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/44814 )
Change subject: Documentation: Add ASan documentation
......................................................................
Patch Set 8:
Automatic boot test returned (PASS/FAIL/TOTAL): 8/1/9
"QEMU x86 q35/ich9" (x86_32) using payload TianoCore : SUCCESS : https://lava.9esec.io/r/20184
"QEMU x86 q35/ich9" (x86_32) using payload SeaBIOS : SUCCESS : https://lava.9esec.io/r/20183
"QEMU x86 i440fx/piix4" (x86_64) using payload SeaBIOS : FAIL : https://lava.9esec.io/r/20182
"QEMU x86 i440fx/piix4" (x86_32) using payload SeaBIOS : SUCCESS : https://lava.9esec.io/r/20181
"QEMU AArch64" using payload LinuxBoot_u-root_kexec : SUCCESS : https://lava.9esec.io/r/20180
"HP Z220 SFF Workstation" (x86_32) using payload LinuxBoot_BusyBox_kexec : SUCCESS : https://lava.9esec.io/r/20188
"HP Z220 SFF Workstation" (x86_32) using payload LinuxBoot_BusyBox_kexec : SUCCESS : https://lava.9esec.io/r/20187
"HP Compaq 8200 Elite SFF PC" (x86_32) using payload TianoCore : SUCCESS : https://lava.9esec.io/r/20186
"HP Compaq 8200 Elite SFF PC" (x86_32) using payload SeaBIOS : SUCCESS : https://lava.9esec.io/r/20185
Please note: This test is under development and might not be accurate at all!
--
To view, visit https://review.coreboot.org/c/coreboot/+/44814
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I710ea495798597189941620c7e48fd5aa7476781
Gerrit-Change-Number: 44814
Gerrit-PatchSet: 8
Gerrit-Owner: Harshit Sharma <harshitsharmajs(a)gmail.com>
Gerrit-Reviewer: Arthur Heymans <arthur(a)aheymans.xyz>
Gerrit-Reviewer: Patrick Georgi <pgeorgi(a)google.com>
Gerrit-Reviewer: Paul Menzel <paulepanter(a)users.sourceforge.net>
Gerrit-Reviewer: Werner Zeh <werner.zeh(a)siemens.com>
Gerrit-Reviewer: build bot (Jenkins) <no-reply(a)coreboot.org>
Gerrit-CC: 9elements QA <hardwaretestrobot(a)gmail.com>
Gerrit-Comment-Date: Mon, 21 Sep 2020 09:59:12 +0000
Gerrit-HasComments: No
Gerrit-Has-Labels: No
Gerrit-MessageType: comment
Duncan Laurie has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/45346 )
Change subject: mb/google/volteer: fw_config: Add fields for keyboard features
......................................................................
mb/google/volteer: fw_config: Add fields for keyboard features
Add newly defined fields for presence of keyboard backlight and
number pad to the firmware configuration table.
We don't have a need to use these in coreboot (yet) but this
keeps the bit definitions in sync.
BUG=b:166707536
TEST=abuild -t google/volteer
Change-Id: I066e445f7d0be056e45737d2c538be1850ae85aa
Signed-off-by: Duncan Laurie <dlaurie(a)google.com>
---
M src/mainboard/google/volteer/variants/baseboard/devicetree.cb
1 file changed, 8 insertions(+), 0 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/46/45346/1
diff --git a/src/mainboard/google/volteer/variants/baseboard/devicetree.cb b/src/mainboard/google/volteer/variants/baseboard/devicetree.cb
index 582f44a..7b018b2 100644
--- a/src/mainboard/google/volteer/variants/baseboard/devicetree.cb
+++ b/src/mainboard/google/volteer/variants/baseboard/devicetree.cb
@@ -22,6 +22,14 @@
option LTE_ABSENT 0
option LTE_PRESENT 1
end
+ field KB_BL 14
+ option KB_BL_ABSENT 0
+ option KB_BL_PRESENT 1
+ end
+ field NUMPAD 15
+ option NUMPAD_ABSENT 0
+ option NUMPAD_PRESENT 1
+ end
field DB_SD 16 19
option SD_ABSENT 0
option SD_GL9755S 1
--
To view, visit https://review.coreboot.org/c/coreboot/+/45346
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I066e445f7d0be056e45737d2c538be1850ae85aa
Gerrit-Change-Number: 45346
Gerrit-PatchSet: 1
Gerrit-Owner: Duncan Laurie <dlaurie(a)chromium.org>
Gerrit-MessageType: newchange
Angel Pons has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/45499 )
Change subject: nb/intel/sandybridge: Check ME status only once
......................................................................
nb/intel/sandybridge: Check ME status only once
The pre-RAM CBMEM console is tiny. Do not fill it with largely redundant
information, when we could instead store more useful raminit debug logs.
Change-Id: I3a93fdeb67b0557e876f78b12241b70933ad324d
Signed-off-by: Angel Pons <th3fanbus(a)gmail.com>
---
M src/northbridge/intel/sandybridge/raminit.c
1 file changed, 0 insertions(+), 1 deletion(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/99/45499/1
diff --git a/src/northbridge/intel/sandybridge/raminit.c b/src/northbridge/intel/sandybridge/raminit.c
index 9ad8fd4..319fea3 100644
--- a/src/northbridge/intel/sandybridge/raminit.c
+++ b/src/northbridge/intel/sandybridge/raminit.c
@@ -453,7 +453,6 @@
/* Zone config */
dram_zones(&ctrl, 0);
- intel_early_me_status();
intel_early_me_init_done(ME_INIT_STATUS_SUCCESS);
intel_early_me_status();
--
To view, visit https://review.coreboot.org/c/coreboot/+/45499
To unsubscribe, or for help writing mail filters, visit https://review.coreboot.org/settings
Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: I3a93fdeb67b0557e876f78b12241b70933ad324d
Gerrit-Change-Number: 45499
Gerrit-PatchSet: 1
Gerrit-Owner: Angel Pons <th3fanbus(a)gmail.com>
Gerrit-Reviewer: Patrick Rudolph <siro(a)das-labor.org>
Gerrit-MessageType: newchange