This patch implements the TCG BIOS interrupt handler 1ah. It is for example used by trusted grub.
This patch adds an implementation of SHA1 (following NIST specs., IETF RFC 3147 and Wikipedia) for speeding up measurements of code. Trusted Grub for example makes use of this interface and measures (calculates SHA1) of the Linux kernel and initrd. Those files can be rather large and hunting their bytes through the TIS interface as part of the int handler commands invoked by trusted grub does take quite some time due to the many vmexits the interface is creating (one per byte).
There is also a threshold for the size of data to hash (100k) below which the TPM is used and above the internal faster SHA1 algorithm is used.
Signed-off-by: Stefan Berger stefanb@linux.vnet.ibm.com
--- src/Kconfig | 8 src/clock.c | 9 src/stacks.c | 14 + src/tcgbios.c | 714 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ src/tcgbios.h | 3 5 files changed, 748 insertions(+)
Index: seabios/src/tcgbios.c =================================================================== --- seabios.orig/src/tcgbios.c +++ seabios/src/tcgbios.c @@ -62,6 +62,9 @@ static const u8 GetCapability_OwnerAuth[ #define RSDP_CAST(ptr) ((struct rsdp_descriptor *)ptr)
+static u32 sha1(const u8 *data, u32 length, u8 *hash); + + /* helper functions */
static inline void *input_buf32(struct bregs *regs) @@ -522,4 +525,715 @@ err_exit: }
+static int +isValidPcpes(struct pcpes *pcpes) +{ + return (pcpes->eventtype != 0); +} + + +static u8 * +get_lasa_last_ptr(u16 *entry_count, u8 **lasa_next) +{ + struct pcpes *pcpes; + u32 laml; + u8 *lasa_base = get_lasa_base_ptr(&laml); + u8 *lasa_last = NULL; + u8 *end = lasa_base + laml; + u32 size; + + if (entry_count) + *entry_count = 0; + + if (!lasa_base) + return NULL; + + while (lasa_base < end) { + pcpes = (struct pcpes *)lasa_base; + if (!isValidPcpes(pcpes)) + break; + if (entry_count) + (*entry_count)++; + size = pcpes->eventdatasize + offsetof(struct pcpes, event); + lasa_last = lasa_base; + lasa_base += size; + } + + if (lasa_next) + *lasa_next = lasa_base; + + return lasa_last; +} + + +/******************************************************************* + Calculation of SHA1 in SW + + See: http://www.itl.nist.gov/fipspubs/fip180-1.htm + RFC3174, Wikipedia's SHA1 alogrithm description + ******************************************************************/ +typedef struct _sha1_ctx { + u32 h[5]; +} sha1_ctx; + + +static inline u32 rol(u32 val, u16 rol) +{ + u32 res; + + __asm__ __volatile__ ("rol %%cl, %%eax" + : "=a" (res) + : "a" (val), "c" (rol)); + + return res; +} + + +static inline u64 bswap_64(u64 val) +{ + u32 hi = (u32)(val >> 32); + u32 lo = (u32)val; + + __asm__ __volatile__ ("bswap %%eax" + : "=a" (lo) + : "a" (lo)); + + __asm__ __volatile__ ("bswap %%eax" + : "=a" (hi) + : "a" (hi)); + + return ((u64)lo << 32 | hi); +} + + +static const u32 sha_ko[4] = { 0x5a827999, + 0x6ed9eba1, + 0x8f1bbcdc, + 0xca62c1d6 }; + + +static void +sha1_block(u32 *w, sha1_ctx *ctx) +{ + u32 i; + u32 a,b,c,d,e,f; + u32 tmp; + u32 idx; + + /* change endianess of given data */ + for (i = 0; i < 16; i++) + w[i] = htonl(w[i]); + + for (i = 16; i <= 79; i++) { + tmp = w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]; + w[i] = rol(tmp,1); + } + + a = ctx->h[0]; + b = ctx->h[1]; + c = ctx->h[2]; + d = ctx->h[3]; + e = ctx->h[4]; + + for (i = 0; i <= 79; i++) { + if (i <= 19) { + f = (b & c) | ((b ^ 0xffffffff) & d); + idx = 0; + } else if (i <= 39) { + f = b ^ c ^ d; + idx = 1; + } else if (i <= 59) { + f = (b & c) | (b & d) | (c & d); + idx = 2; + } else { + f = b ^ c ^ d; + idx = 3; + } + + tmp = rol(a, 5) + + f + + e + + sha_ko[idx] + + w[i]; + e = d; + d = c; + c = rol(b, 30); + b = a; + a = tmp; + } + + ctx->h[0] += a; + ctx->h[1] += b; + ctx->h[2] += c; + ctx->h[3] += d; + ctx->h[4] += e; +} + + +static void +sha1_do(sha1_ctx *ctx, const u8 *data32, u32 length) +{ + u32 offset; + u16 num; + u32 bits = 0; + u32 w[80]; + u64 tmp; + + /* treat data in 64-byte chunks */ + for (offset = 0; length - offset >= 64; offset += 64) { + memcpy(w, data32 + offset, 64); + sha1_block((u32 *)w, ctx); + bits += (64 * 8); + } + + /* last block with less than 64 bytes */ + num = length - offset; + bits += (num << 3); + + memcpy(w, data32 + offset, num); + ((u8 *)w)[num] = 0x80; + if (64 - (num + 1) > 0) + memset( &((u8 *)w)[num + 1], 0x0, 64 - (num + 1)); + + if (num >= 56) { + /* cannot append number of bits here */ + sha1_block((u32 *)w, ctx); + memset(w, 0x0, 60); + } + + /* write number of bits to end of block */ + tmp = bswap_64(bits); + memcpy(&w[14], &tmp, 8); + + sha1_block(w, ctx); + + /* need to switch result's endianess */ + for (num = 0; num < 5; num++) + ctx->h[num] = htonl(ctx->h[num]); +} + + +static u32 +sha1_fast(const u8 *data, u32 length, u8 *hash) +{ + sha1_ctx ctx = { + .h[0] = 0x67452301, + .h[1] = 0xefcdab89, + .h[2] = 0x98badcfe, + .h[3] = 0x10325476, + .h[4] = 0xc3d2e1f0, + }; + + sha1_do(&ctx, data, length); + memcpy(hash, &ctx.h[0], 20); + + return 0; +} + + + + +#if CONFIG_TPM_FOR_SHA1 + +static u32 +sha1(const u8 *data, u32 length, u8 *hash) +{ + u32 rc; + u32 returnCode; + struct tpm_res_sha1start start; + struct tpm_res_sha1complete complete; + u32 blocks = length / 64; + u32 rest = length % 0x3f; + u32 numbytes, numbytes_no; + u32 offset = 0; + + if (length > 100 * 1024) + return sha1_fast(data, length, hash); + + rc = build_and_send_cmd(TPM_ORD_SHA1Start, + NULL, 0, + (u8 *)&start, + sizeof(struct tpm_res_sha1start), + &returnCode); + + if (rc || returnCode) + goto err_exit; + + while (blocks > 0) { + + numbytes = ntohl(start.max_num_bytes); + if (numbytes > blocks * 64) + numbytes = blocks * 64; + + numbytes_no = htonl(numbytes); + + rc = build_and_send_cmd_od(TPM_ORD_SHA1Update, + (u8 *)&numbytes_no, sizeof(numbytes_no), + NULL, 0, &returnCode, + &data[offset], numbytes); + + if (rc || returnCode) + goto err_exit; + + offset += numbytes; + blocks -= (numbytes / 64); + } + + numbytes_no = htonl(rest); + + rc = build_and_send_cmd_od(TPM_ORD_SHA1Complete, + (u8 *)&numbytes_no, sizeof(numbytes_no), + (u8 *)&complete, + sizeof(struct tpm_res_sha1complete), + &returnCode, + &data[offset], rest); + + if (rc || returnCode) + goto err_exit; + + memcpy(hash, complete.hash, sizeof(complete.hash)); + + return 0; + +err_exit: +#ifdef DEBUG_TCGBIOS + dprintf(1,"TCGBIOS: TPM SHA1 malfunctioning.\n"); +#endif + tcpa_state.tpm_working = 0; + if (rc) + return rc; + return TCG_TCG_COMMAND_ERROR; +} + +#else + +static u32 +sha1(const u8 *data, u32 length, u8 *hash) +{ + return sha1_fast(data, length, hash); +} + +#endif +/* + * Extend the ACPI log with the given entry by copying the + * entry data into the log. + * Input + * Pointer to the structure to be copied into the log + * + * Output: + * lower 16 bits of return code contain entry number + * if entry number is '0', then upper 16 bits contain error code. + */ +static u32 +tcpa_extend_acpi_log(void *entry_ptr, u16 *entry_count) +{ + u32 laml, size; + u8 *lasa_base = get_lasa_base_ptr(&laml), *lasa_next; + struct pcpes *pcpes = (struct pcpes *)entry_ptr; + + get_lasa_last_ptr(entry_count, &lasa_next); + +#ifdef DEBUG_TCGBIOS + dprintf(1, "TCGBIOS: LASA_BASE = %p, LASA_NEXT = %p\n",lasa_base, lasa_next); +#endif + + if (lasa_next == NULL || laml == 0) + return TCG_PC_LOGOVERFLOW; + + size = pcpes->eventdatasize + offsetof(struct pcpes, event); + + if ((lasa_next + size - lasa_base) > laml) { +#ifdef DEBUG_TCGBIOS + dprintf(1, "TCGBIOS: LOG OVERFLOW: size = %d\n", size); +#endif + return TCG_PC_LOGOVERFLOW; + } + + memcpy(lasa_next, entry_ptr, size); + + (*entry_count)++; + + return 0; +} + + +static u32 +is_preboot_if_shutdown(void) +{ + return tcpa_state.if_shutdown; +} + + +static u32 +shutdown_preboot_interface(void) +{ + u32 rc = 0; + + if (!is_preboot_if_shutdown()) { + tcpa_state.if_shutdown = 1; + } else { + rc = TCG_INTERFACE_SHUTDOWN; + } + + return rc; +} + + +static void +tcpa_shutdown(void) +{ + reset_acpi_log(); + shutdown_preboot_interface(); +} + + +static u32 +pass_through_to_tpm(struct pttti *pttti, struct pttto *pttto) +{ + u32 rc = 0; + u32 resbuflen = 0; + struct tpm_req_header *trh; + u8 locty = 0; + struct iovec iovec[2]; + const u32 *tmp; + + if (is_preboot_if_shutdown()) { + rc = TCG_INTERFACE_SHUTDOWN; + goto err_exit; + } + + trh = (struct tpm_req_header *)pttti->tpmopin; + + if (pttti->ipblength < sizeof(struct pttti) + TPM_REQ_HEADER_SIZE || + pttti->opblength < sizeof(struct pttto) || + ntohl(trh->totlen) + sizeof(struct pttti) > pttti->ipblength ) { + rc = TCG_INVALID_INPUT_PARA; + goto err_exit; + } + + resbuflen = pttti->opblength - offsetof(struct pttto, tpmopout); + + iovec[0].data = pttti->tpmopin; + tmp = (const u32 *)&((u8 *)iovec[0].data)[2]; + iovec[0].length = htonl(*tmp); + + iovec[1].data = NULL; + iovec[1].length = 0; + + rc = transmit(locty, iovec, pttto->tpmopout, &resbuflen); + if (rc) + goto err_exit; + + pttto->opblength = offsetof(struct pttto, tpmopout) + resbuflen; + pttto->reserved = 0; + +err_exit: + if (rc != 0) { + pttto->opblength = 4; + pttto->reserved = 0; + } + + return rc; +} + + +static u32 +tpm_extend(u8 *hash, u32 pcrindex) +{ + u32 rc; + struct pttto_extend pttto; + struct pttti_extend pttti = { + .pttti = { + .ipblength = sizeof(struct pttti_extend), + .opblength = sizeof(struct pttto_extend), + }, + .req = { + .tag = htons(0xc1), + .totlen = htonl(sizeof(pttti.req)), + .ordinal = htonl(TPM_ORD_Extend), + .pcrindex = htonl(pcrindex), + }, + }; + + memcpy(pttti.req.digest, hash, sizeof(pttti.req.digest)); + + rc = pass_through_to_tpm(&pttti.pttti, &pttto.pttto); + + if (rc == 0) { + if (pttto.pttto.opblength < TPM_RSP_HEADER_SIZE || + pttto.pttto.opblength != + sizeof(struct pttto) + ntohl(pttto.rsp.totlen) || + ntohs(pttto.rsp.tag) != 0xc4) { + rc = TCG_FATAL_COM_ERROR; + } + } + + if (rc) + tcpa_shutdown(); + + return rc; +} + + +static u32 +hash_all(const struct hai *hai, u8 *hash) +{ + if (is_preboot_if_shutdown() != 0) + return TCG_INTERFACE_SHUTDOWN; + + if (hai->ipblength != sizeof(struct hai) || + hai->hashdataptr == 0 || + hai->hashdatalen == 0 || + hai->algorithmid != TPM_ALG_SHA) + return TCG_INVALID_INPUT_PARA; + + return sha1((const u8 *)hai->hashdataptr, hai->hashdatalen, hash); +} + + +static u32 +hash_log_event(const struct hlei *hlei, struct hleo *hleo) +{ + u32 rc = 0; + u16 size; + struct pcpes *pcpes; + u16 entry_count; + + if (is_preboot_if_shutdown() != 0) { + rc = TCG_INTERFACE_SHUTDOWN; + goto err_exit; + } + + size = hlei->ipblength; + if (size != sizeof(*hlei)) { + rc = TCG_INVALID_INPUT_PARA; + goto err_exit; + } + + pcpes = (struct pcpes *)hlei->logdataptr; + + if (pcpes->pcrindex >= 24 || + pcpes->pcrindex != hlei->pcrindex || + pcpes->eventtype != hlei->logeventtype) { + rc = TCG_INVALID_INPUT_PARA; + goto err_exit; + } + + if ((hlei->hashdataptr != 0) && (hlei->hashdatalen != 0)) { + rc = sha1((const u8 *)hlei->hashdataptr, + hlei->hashdatalen, pcpes->digest); + if (rc) + return rc; + } + + rc = tcpa_extend_acpi_log((void *)hlei->logdataptr, &entry_count); + if (rc) + goto err_exit; + + /* updating the log was fine */ + hleo->opblength = sizeof(struct hleo); + hleo->reserved = 0; + hleo->eventnumber = entry_count; + +err_exit: + if (rc != 0) { + hleo->opblength = 2; + hleo->reserved = 0; + } + + return rc; +} + + +static u32 +hash_log_extend_event(const struct hleei_short *hleei_s, struct hleeo *hleeo) +{ + u32 rc = 0; + struct hleo hleo; + struct hleei_long *hleei_l = (struct hleei_long *)hleei_s; + const void *logdataptr; + u32 logdatalen; + struct pcpes *pcpes; + + /* short or long version? */ + switch (hleei_s->ipblength) { + case sizeof(struct hleei_short): + /* short */ + logdataptr = hleei_s->logdataptr; + logdatalen = hleei_s->logdatalen; + break; + + case sizeof(struct hleei_long): + /* long */ + logdataptr = hleei_l->logdataptr; + logdatalen = hleei_l->logdatalen; + break; + + default: + /* bad input block */ + rc = TCG_INVALID_INPUT_PARA; + goto err_exit; + } + + pcpes = (struct pcpes *)logdataptr; + + struct hlei hlei = { + .ipblength = sizeof(hlei), + .hashdataptr = hleei_s->hashdataptr, + .hashdatalen = hleei_s->hashdatalen, + .pcrindex = hleei_s->pcrindex, + .logeventtype= pcpes->eventtype, + .logdataptr = logdataptr, + .logdatalen = logdatalen, + }; + + rc = hash_log_event(&hlei, &hleo); + if (rc) + goto err_exit; + + hleeo->opblength = sizeof(struct hleeo); + hleeo->reserved = 0; + hleeo->eventnumber = hleo.eventnumber; + + rc = tpm_extend(pcpes->digest, hleei_s->pcrindex); + +err_exit: + if (rc != 0) { + hleeo->opblength = 4; + hleeo->reserved = 0; + } + + return rc; + +} + + +static u32 +tss(struct ti *ti, struct to *to) +{ + u32 rc = 0; + + if (is_preboot_if_shutdown() == 0) { + rc = TCG_PC_UNSUPPORTED; + } else { + rc = TCG_INTERFACE_SHUTDOWN; + } + + to->opblength = sizeof(struct to); + to->reserved = 0; + + return rc; +} + + +static u32 +compact_hash_log_extend_event(u8 *buffer, + u32 info, + u32 length, + u32 pcrindex, + u32 *edx_ptr) +{ + u32 rc = 0; + struct hleeo hleeo; + struct pcpes pcpes = { + .pcrindex = pcrindex, + .eventtype = EV_COMPACT_HASH, + .eventdatasize = sizeof(info), + .event = info, + }; + struct hleei_short hleei = { + .ipblength = sizeof(hleei), + .hashdataptr = buffer, + .hashdatalen = length, + .pcrindex = pcrindex, + .logdataptr = &pcpes, + .logdatalen = sizeof(pcpes), + }; + + rc = hash_log_extend_event(&hleei, &hleeo); + if (rc == 0) + *edx_ptr = hleeo.eventnumber; + + return rc; +} + + +void VISIBLE32FLAT +tcpa_interrupt_handler32(struct bregs *regs) +{ + switch ((enum irq_ids)regs->al) { + case TCG_StatusCheck: + if (is_tpm_present() == 0) { + /* no TPM available */ + regs->eax = TCG_PC_TPM_NOT_PRESENT; + } else { + regs->eax = 0; + regs->ebx = TCG_MAGIC; + regs->ch = TCG_VERSION_MAJOR; + regs->cl = TCG_VERSION_MINOR; + regs->edx = 0x0; + regs->esi = (u32)get_lasa_base_ptr(NULL); + regs->edi = + (u32)get_lasa_last_ptr(NULL, NULL); + set_cf(regs, 0);; + } + break; + + case TCG_HashLogExtendEvent: + regs->eax = + hash_log_extend_event( + (struct hleei_short *)input_buf32(regs), + (struct hleeo *)output_buf32(regs)); + set_cf(regs, 0); + break; + + case TCG_PassThroughToTPM: + regs->eax = + pass_through_to_tpm((struct pttti *)input_buf32(regs), + (struct pttto *)output_buf32(regs)); + set_cf(regs, 0); + break; + + case TCG_ShutdownPreBootInterface: + regs->eax = shutdown_preboot_interface(); + set_cf(regs, 0); + break; + + case TCG_HashLogEvent: + regs->eax = hash_log_event((struct hlei*)input_buf32(regs), + (struct hleo*)output_buf32(regs)); + set_cf(regs, 0); + break; + + case TCG_HashAll: + regs->eax = + hash_all((struct hai*)input_buf32(regs), + (u8 *)output_buf32(regs)); + set_cf(regs, 0); + break; + + case TCG_TSS: + regs->eax = tss((struct ti*)input_buf32(regs), + (struct to*)output_buf32(regs)); + set_cf(regs, 0); + break; + + case TCG_CompactHashLogExtendEvent: + regs->eax = + compact_hash_log_extend_event((u8 *)input_buf32(regs), + regs->esi, + regs->ecx, + regs->edx, + ®s->edx); + set_cf(regs, 0); + break; + + default: + set_cf(regs, 1); + } + + return; +} + + #endif /* CONFIG_TCGBIOS */ Index: seabios/src/stacks.c =================================================================== --- seabios.orig/src/stacks.c +++ seabios/src/stacks.c @@ -7,6 +7,7 @@ #include "biosvar.h" // get_ebda_seg #include "util.h" // dprintf #include "bregs.h" // CR0_PE +#include "tcgbios.h"
// Thread info - stored at bottom of each thread stack - don't change // without also updating the inline assembler below. @@ -393,3 +394,16 @@ check_preempt(void) extern void _cfunc32flat_yield_preempt(void); call32(_cfunc32flat_yield_preempt, 0, 0); } + + +#ifdef CONFIG_TCGBIOS +void tcpa_interrupt_handler16(struct bregs *regs) +{ + if (MODESEGMENT) { + dprintf(3, "16: Calling tcpa_interrupt_handler\n"); + call32(_cfunc32flat_tcpa_interrupt_handler32, (u32)regs, 0); + } else { + _cfunc32flat_tcpa_interrupt_handler32(regs); + } +} +#endif /* CONFIG_TCGBIOS */ Index: seabios/src/clock.c =================================================================== --- seabios.orig/src/clock.c +++ seabios/src/clock.c @@ -434,6 +434,14 @@ handle_1a07(struct bregs *regs) set_success(regs); }
+static void +handle_1abb(struct bregs *regs) +{ +#if CONFIG_TCGBIOS + tcpa_interrupt_handler16(regs); +#endif +} + // Unsupported static void handle_1aXX(struct bregs *regs) @@ -456,6 +464,7 @@ handle_1a(struct bregs *regs) case 0x06: handle_1a06(regs); break; case 0x07: handle_1a07(regs); break; case 0xb1: handle_1ab1(regs); break; + case 0xbb: handle_1abb(regs); break; default: handle_1aXX(regs); break; } } Index: seabios/src/tcgbios.h =================================================================== --- seabios.orig/src/tcgbios.h +++ seabios/src/tcgbios.h @@ -359,6 +359,9 @@ enum ipltype { IPL_EL_TORITO_2 };
+void tcpa_interrupt_handler32(struct bregs *regs); +void _cfunc32flat_tcpa_interrupt_handler32(struct bregs *regs); + #if CONFIG_TCGBIOS void tcpa_acpi_init(void); int has_working_tpm(void); Index: seabios/src/Kconfig =================================================================== --- seabios.orig/src/Kconfig +++ seabios/src/Kconfig @@ -322,6 +322,14 @@ menu "BIOS interfaces" help Provide TPM support along with TCG BIOS extensions
+ config TPM_FOR_SHA1 + depends on TCGBIOS + bool "Use the TPM for SHA1 calculations" + default y + help + Either you may use the TPM for SHA1 calculations or + use the internal sha1 algorithm to do it (faster). + endmenu
menu "BIOS Tables"