Angel Pons has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/44880 )
Change subject: security/intel/txt: Improve MTRR setup for GETSEC[ENTERACCS] ......................................................................
security/intel/txt: Improve MTRR setup for GETSEC[ENTERACCS]
The BIOS ACM will check that enabled variable MTRRs do not cover more than the ACM's size, rounded up to 4 KiB. If that is not the case, launching the ACM will result in a lovely TXT reset. How boring.
The new algorithm simply performs a reverse bit scan in a loop, and allocates one MTRR for each set bit in the rounded-up size to cache. Before allocating anything, it checks if there are enough variable MTRRs; if not, it will refuse to cache anything. This will result in another TXT reset, initiated by the processor, with error type 5:
Load memory type error in Authenticated Code Execution Area.
This can only happen if the ACM has different caching requirements that the current code does not know about, or something has been compromised. Therefore, causing a TXT reset should be a reasonable enough approach.
Also, disable all MTRRs before clearing the variable MTRRs and only enable them again once they have been set up with the new values.
Tested on Asrock B85M Pro4 with a BIOS ACM whose size is 101504 bytes. Without this patch, launching the ACM would result in a TXT reset. This no longer happens when this patch is appliedd.
Change-Id: I8d411f6450928357544be20250262c2005d1e75d Signed-off-by: Angel Pons th3fanbus@gmail.com --- M src/security/intel/txt/getsec_enteraccs.S 1 file changed, 94 insertions(+), 18 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/80/44880/1
diff --git a/src/security/intel/txt/getsec_enteraccs.S b/src/security/intel/txt/getsec_enteraccs.S index 3135de7..b78ab8a 100644 --- a/src/security/intel/txt/getsec_enteraccs.S +++ b/src/security/intel/txt/getsec_enteraccs.S @@ -107,10 +107,10 @@ orl $(CR0_CD | CR0_NW), %eax movl %eax, %cr0
- /* Disable fixed MTRRs */ + /* Disable all MTRRs */ movl $(MTRR_DEF_TYPE_MSR), %ecx - rdmsr - andl $(~MTRR_DEF_TYPE_FIX_EN), %eax + xorl %eax, %eax + xorl %edx, %edx wrmsr
/* @@ -164,24 +164,100 @@ * Chapter A.1.1 * Intel TXT Software Development Guide (Document: 315168-015) */ - movl $(MTRR_PHYS_BASE(0)), %ecx - movl 12(%ebp), %eax /* %eax = acmbase */ - orl $(6), %eax /* MTRR_TYPE_WB */ - movl $0, %edx - wrmsr
- /* Round acmsize to next power of two. Required for MTRR programming. */ + /* + * Important note: The MTRRs must cache less than a page (4 KiB) + * of unused memory after the BIOS ACM. Failure to do so will + * result in a TXT reset with Class Code 5, Major Error Code 2. + */ + + /* Determine size of AC module */ + movl 12(%ebp), %eax /* %eax = acmbase */ movl $1, %ebx - movl 16(%ebp), %ecx /* %ebx = acmsize */ - dec %ecx - bsr %ecx, %ecx /* find MSB */ - inc %ecx - shl %cl, %ebx - movl $(MTRR_PHYS_MASK(0)), %ecx - xorl %eax, %eax - subl %ebx, %eax /* %eax = 4GIB - log2_ceil(ACM SIZE) */ - orl $((1 << 11)), %eax /* MTRR_PHYS_MASK_VALID */ + movl 16(%ebp), %ebx /* %ebx = acmsize */ + + /* Round up to page size */ + addl $(0xfff), %ebx + andl $(~0xfff), %ebx /* Aligned to a page (4 KiB) */ + + /* Use XMM to store local variables */ + movd %eax, %xmm0 /* XMM0: Base address of next MTRR */ + movd %ebx, %xmm1 /* XMM1: Remaining size to cache */ + + /* Get the number of variable MTRRs */ + movl $(MTRR_CAP_MSR), %ecx + rdmsr + andl $(0xff), %eax + + /* Initialize ECX */ + movl $(MTRR_PHYS_BASE(0)), %ecx + + /* Check if there are enough variable MTRRs to cache this size */ + popcnt %ebx, %edx + cmp %eax, %edx + + jbe cond_allocate_var_mtrrs + + /* + * There aren't enough MTRRs to handle this size. + * The ACM will be uncached, and will invoke TXT reset. + * + * FIXME: Do proper error handling? + */ + xorl %ebx, %ebx + movd %ebx, %xmm1 + + jmp cond_allocate_var_mtrrs + +body_allocate_var_mtrrs: + + /* Program MTRR base */ + xorl %edx, %edx + movd %xmm0, %eax + orl $(MTRR_TYPE_WRBACK), %eax + wrmsr + incl %ecx /* MTRR_PHYS_MASK */ + + /* Temporarily transfer MSR index to EDX so that CL can be used */ + movl %ecx, %edx + + /* Determine next size to cache */ + bsr %ebx, %ecx + movl $(1), %ebx + shl %cl, %ebx /* Can only use CL here */ + + /* Restore ECX */ + movl %edx, %ecx + + /* Update saved base address */ + addl %ebx, %eax + movd %eax, %xmm0 + + /* Update saved remaining size */ + movd %xmm1, %eax + subl %ebx, %eax + movd %eax, %xmm1 + + /* Program MTRR mask */ movl MTRR_HIGH_MASK, %edx + xorl %eax, %eax + subl %ebx, %eax /* %eax = 4GIB - size to cache */ + orl $((1 << 11)), %eax /* MTRR_PHYS_MASK_VALID */ + wrmsr + incl %ecx /* Next MTRR_PHYS_BASE */ + +cond_allocate_var_mtrrs: + + /* Check if we still need to cache something */ + movd %xmm1, %ebx + andl %ebx, %ebx + + jnz body_allocate_var_mtrrs + + /* Enable variable MTRRs */ + movl $(MTRR_DEF_TYPE_MSR), %ecx + rdmsr + orl $(MTRR_DEF_TYPE_EN), %eax wrmsr
/* Enable cache - GPF# if not done */