Attention is currently required from: Julius Werner.

Martin L Roth has uploaded this change for review.

View Change

arch/arm64: Reformat C files with clang-format v16

Reformat only - No functional changes.

Change-Id: I4543a416d16d2689d7962e9bc69de4ba703495fb
Signed-off-by: Martin Roth <gaumless@gmail.com>
---
M src/arch/arm64/armv8/cache.c
M src/arch/arm64/armv8/exception.c
M src/arch/arm64/armv8/mmu.c
M src/arch/arm64/bl31.c
M src/arch/arm64/eabi_compat.c
M src/arch/arm64/fit_payload.c
M src/arch/arm64/romstage.c
M src/arch/arm64/tables.c
M src/arch/arm64/transition.c
9 files changed, 81 insertions(+), 112 deletions(-)

git pull ssh://review.coreboot.org:29418/coreboot refs/changes/09/80009/1
diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c
index dbaedec..1a2d632 100644
--- a/src/arch/arm64/armv8/cache.c
+++ b/src/arch/arm64/armv8/cache.c
@@ -46,7 +46,8 @@
}
}

-void cpu_get_cache_info(enum cache_level level, enum cache_type type, size_t *cache_size, size_t *assoc)
+void cpu_get_cache_info(enum cache_level level, enum cache_type type, size_t *cache_size,
+ size_t *assoc)
{
uint64_t ccsidr_el1;

@@ -100,7 +101,7 @@
* coherency in drivers which do DMA transfers and only need to perform
* cache maintenance on a particular memory range rather than the entire cache.
*/
-static void dcache_op_va(void const *addr, size_t len, enum dcache_op op)
+static void dcache_op_va(const void *addr, size_t len, enum dcache_op op)
{
uint64_t line, linesize;

@@ -127,17 +128,17 @@
isb();
}

-void dcache_clean_by_mva(void const *addr, size_t len)
+void dcache_clean_by_mva(const void *addr, size_t len)
{
dcache_op_va(addr, len, OP_DCCVAC);
}

-void dcache_clean_invalidate_by_mva(void const *addr, size_t len)
+void dcache_clean_invalidate_by_mva(const void *addr, size_t len)
{
dcache_op_va(addr, len, OP_DCCIVAC);
}

-void dcache_invalidate_by_mva(void const *addr, size_t len)
+void dcache_invalidate_by_mva(const void *addr, size_t len)
{
dcache_op_va(addr, len, OP_DCIVAC);
}
diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c
index 15d7e38..4883eaf 100644
--- a/src/arch/arm64/armv8/exception.c
+++ b/src/arch/arm64/armv8/exception.c
@@ -8,7 +8,7 @@
#include <console/uart.h>
#include <arch/lib_helpers.h>

-uint8_t exception_stack[2*KiB] __attribute__((aligned(16)));
+uint8_t exception_stack[2 * KiB] __attribute__((aligned(16)));

static const char *exception_names[NUM_EXC_VIDS] = {
[EXC_VID_CUR_SP_EL0_SYNC] = "_sync_sp_el0",
@@ -26,7 +26,7 @@
[EXC_VID_LOW32_SYNC] = "_sync_elx_32",
[EXC_VID_LOW32_IRQ] = "_irq_elx_32",
[EXC_VID_LOW32_FIQ] = "_fiq_elx_32",
- [EXC_VID_LOW32_SERR] = "_serror_elx_32"
+ [EXC_VID_LOW32_SERR] = "_serror_elx_32",
};

static void dump_stack(uintptr_t addr, size_t bytes)
@@ -50,17 +50,15 @@
struct elx_state *elx = &exc_state->elx;
struct regs *regs = &exc_state->regs;

- printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08llx\n",
- elx->elr, raw_read_esr_el3());
- printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08llx\n",
- raw_read_far_el3(), raw_read_spsr_el3());
+ printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08llx\n", elx->elr,
+ raw_read_esr_el3());
+ printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08llx\n", raw_read_far_el3(),
+ raw_read_spsr_el3());
for (i = 0; i < 30; i += 2) {
- printk(BIOS_DEBUG,
- "X%02d = 0x%016llx X%02d = 0x%016llx\n",
- i, regs->x[i], i + 1, regs->x[i + 1]);
+ printk(BIOS_DEBUG, "X%02d = 0x%016llx X%02d = 0x%016llx\n", i,
+ regs->x[i], i + 1, regs->x[i + 1]);
}
- printk(BIOS_DEBUG, "X30 = 0x%016llx SP = 0x%016llx\n",
- regs->x[30], regs->sp);
+ printk(BIOS_DEBUG, "X30 = 0x%016llx SP = 0x%016llx\n", regs->x[30], regs->sp);
}

static struct exception_handler *handlers[NUM_EXC_VIDS];
diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c
index 3cedbcf..abf0ab8 100644
--- a/src/arch/arm64/armv8/mmu.c
+++ b/src/arch/arm64/armv8/mmu.c
@@ -17,14 +17,10 @@

static void print_tag(int level, uint64_t tag)
{
- printk(level, tag & MA_MEM_NC ? "non-cacheable | " :
- " cacheable | ");
- printk(level, tag & MA_RO ? "read-only | " :
- "read-write | ");
- printk(level, tag & MA_NS ? "non-secure | " :
- " secure | ");
- printk(level, tag & MA_MEM ? "normal\n" :
- "device\n");
+ printk(level, tag & MA_MEM_NC ? "non-cacheable | " : " cacheable | ");
+ printk(level, tag & MA_RO ? "read-only | " : "read-write | ");
+ printk(level, tag & MA_NS ? "non-secure | " : " secure | ");
+ printk(level, tag & MA_MEM ? "normal\n" : "device\n");
}

/* Func : get_block_attr
@@ -59,15 +55,14 @@
static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
{
while (next_free_table[0] != UNUSED_DESC) {
- next_free_table += GRANULE_SIZE/sizeof(*next_free_table);
+ next_free_table += GRANULE_SIZE / sizeof(*next_free_table);
if (_ettb - (u8 *)next_free_table <= 0)
die("Ran out of page table space!");
}

void *frame_base = (void *)(desc & XLAT_ADDR_MASK);
- printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page"
- " table @%p\n", frame_base, frame_base +
- (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table);
+ printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page table @%p\n",
+ frame_base, frame_base + (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table);

if (!desc) {
memset(next_free_table, 0, GRANULE_SIZE);
@@ -77,7 +72,7 @@
desc |= PAGE_DESC;

int i = 0;
- for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) {
+ for (; i < GRANULE_SIZE / sizeof(*next_free_table); i++) {
next_free_table[i] = desc;
desc += xlat_size;
}
@@ -109,9 +104,7 @@
* accordingly. On success, it returns the size of the block/page addressed by
* the final table.
*/
-static uint64_t init_xlat_table(uint64_t base_addr,
- uint64_t size,
- uint64_t tag)
+static uint64_t init_xlat_table(uint64_t base_addr, uint64_t size, uint64_t tag)
{
uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
@@ -125,23 +118,21 @@
table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);

/* L1 table lookup */
- if ((size >= L1_XLAT_SIZE) &&
- IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
- /* If block address is aligned and size is greater than
+ if ((size >= L1_XLAT_SIZE) && IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
+ /* If block address is aligned and size is greater than
* or equal to size addressed by each L1 entry, we can
* directly store a block desc */
- desc = base_addr | BLOCK_DESC | attr;
- table[l1_index] = desc;
- /* L2 lookup is not required */
- return L1_XLAT_SIZE;
+ desc = base_addr | BLOCK_DESC | attr;
+ table[l1_index] = desc;
+ /* L2 lookup is not required */
+ return L1_XLAT_SIZE;
}

/* L1 entry stores a table descriptor */
table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);

/* L2 table lookup */
- if ((size >= L2_XLAT_SIZE) &&
- IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
+ if ((size >= L2_XLAT_SIZE) && IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
/* If block address is aligned and size is greater than
* or equal to size addressed by each L2 entry, we can
* directly store a block desc */
@@ -165,10 +156,8 @@
*/
static void sanity_check(uint64_t addr, uint64_t size)
{
- assert(!(addr & GRANULE_SIZE_MASK) &&
- !(size & GRANULE_SIZE_MASK) &&
- (addr + size < (1UL << BITS_PER_VA)) &&
- size >= GRANULE_SIZE);
+ assert(!(addr & GRANULE_SIZE_MASK) && !(size & GRANULE_SIZE_MASK) &&
+ (addr + size < (1UL << BITS_PER_VA)) && size >= GRANULE_SIZE);
}

/* Func : get_pte
@@ -179,11 +168,9 @@
uint64_t *pte = (uint64_t *)_ttb;

while (1) {
- int index = ((uintptr_t)addr >> shift) &
- ((1UL << BITS_RESOLVED_PER_LVL) - 1);
+ int index = ((uintptr_t)addr >> shift) & ((1UL << BITS_RESOLVED_PER_LVL) - 1);

- if ((pte[index] & DESC_MASK) != TABLE_DESC ||
- shift <= GRANULE_SIZE_SHIFT)
+ if ((pte[index] & DESC_MASK) != TABLE_DESC || shift <= GRANULE_SIZE_SHIFT)
return pte[index];

pte = (uint64_t *)(pte[index] & XLAT_ADDR_MASK);
@@ -197,8 +184,8 @@
static void assert_correct_ttb_mapping(void *addr)
{
uint64_t pte = get_pte(addr);
- assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
- == BLOCK_INDEX_MEM_NORMAL && !(pte & BLOCK_NS));
+ assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK) == BLOCK_INDEX_MEM_NORMAL &&
+ !(pte & BLOCK_NS));
}

/* Func : mmu_config_range
@@ -212,15 +199,13 @@
uint64_t base_addr = (uintptr_t)start;
uint64_t temp_size = size;

- printk(BIOS_INFO, "Mapping address range [%p:%p) as ",
- start, start + size);
+ printk(BIOS_INFO, "Mapping address range [%p:%p) as ", start, start + size);
print_tag(BIOS_INFO, tag);

sanity_check(base_addr, temp_size);

while (temp_size)
- temp_size -= init_xlat_table(base_addr + (size - temp_size),
- temp_size, tag);
+ temp_size -= init_xlat_table(base_addr + (size - temp_size), temp_size, tag);

/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
dsb();
@@ -237,7 +222,7 @@
{
/* Initially mark all table slots unused (first PTE == UNUSED_DESC). */
uint64_t *table = (uint64_t *)_ttb;
- for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table))
+ for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE / sizeof(*table))
table[0] = UNUSED_DESC;

/* Initialize the root table (L0) to be completely unmapped. */
@@ -251,9 +236,8 @@
raw_write_mair_el3(MAIR_ATTRIBUTES);

/* Initialize TCR flags */
- raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
- TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
- TCR_TBI_USED);
+ raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | TCR_SH0_IS |
+ TCR_TG0_4KB | TCR_PS_256TB | TCR_TBI_USED);
}

/* Func : mmu_save_context
diff --git a/src/arch/arm64/bl31.c b/src/arch/arm64/bl31.c
index 06676bc..ece4236 100644
--- a/src/arch/arm64/bl31.c
+++ b/src/arch/arm64/bl31.c
@@ -13,20 +13,16 @@
#include <arm-trusted-firmware/include/export/common/bl_common_exp.h>

static entry_point_info_t bl32_ep_info = {
- .h = {
- .type = PARAM_EP,
- .version = PARAM_VERSION_1,
- .size = sizeof(bl32_ep_info),
- .attr = EP_SECURE,
- },
+ .h = {.type = PARAM_EP,
+ .version = PARAM_VERSION_1,
+ .size = sizeof(bl32_ep_info),
+ .attr = EP_SECURE},
};
static entry_point_info_t bl33_ep_info = {
- .h = {
- .type = PARAM_EP,
- .version = PARAM_VERSION_1,
- .size = sizeof(bl33_ep_info),
- .attr = EP_NON_SECURE,
- },
+ .h = {.type = PARAM_EP,
+ .version = PARAM_VERSION_1,
+ .size = sizeof(bl33_ep_info),
+ .attr = EP_NON_SECURE},
};

static bl_params_node_t bl32_params_node = {
@@ -39,12 +35,10 @@
};

static bl_params_t bl_params = {
- .h = {
- .type = PARAM_BL_PARAMS,
- .version = PARAM_VERSION_2,
- .size = sizeof(bl_params),
- .attr = 0,
- },
+ .h = {.type = PARAM_BL_PARAMS,
+ .version = PARAM_VERSION_2,
+ .size = sizeof(bl_params),
+ .attr = 0},
.head = &bl33_params_node,
};

@@ -61,7 +55,7 @@
__weak void *soc_get_bl31_plat_params(void)
{
static struct bl_aux_param_uint64 cbtable_param = {
- .h = { .type = BL_AUX_PARAM_COREBOOT_TABLE, },
+ .h = {.type = BL_AUX_PARAM_COREBOOT_TABLE},
};
if (!cbtable_param.value) {
cbtable_param.value = (uint64_t)cbmem_find(CBMEM_ID_CBTABLE);
@@ -73,7 +67,7 @@

void run_bl31(u64 payload_entry, u64 payload_arg0, u64 payload_spsr)
{
- struct prog bl31 = PROG_INIT(PROG_BL31, CONFIG_CBFS_PREFIX"/bl31");
+ struct prog bl31 = PROG_INIT(PROG_BL31, CONFIG_CBFS_PREFIX "/bl31");
void (*bl31_entry)(bl_params_t *params, void *plat_params) = NULL;

if (!selfload_check(&bl31, BM_MEM_BL31))
@@ -81,15 +75,13 @@
bl31_entry = prog_entry(&bl31);

if (CONFIG(ARM64_USE_SECURE_OS)) {
- struct prog bl32 = PROG_INIT(PROG_BL32,
- CONFIG_CBFS_PREFIX"/secure_os");
+ struct prog bl32 = PROG_INIT(PROG_BL32, CONFIG_CBFS_PREFIX "/secure_os");

if (cbfs_prog_stage_load(&bl32))
die("BL32 load failed");

bl32_ep_info.pc = (uintptr_t)prog_entry(&bl32);
- bl32_ep_info.spsr = SPSR_EXCEPTION_MASK |
- get_eret_el(EL1, SPSR_USE_L);
+ bl32_ep_info.spsr = SPSR_EXCEPTION_MASK | get_eret_el(EL1, SPSR_USE_L);
bl33_params_node.next_params_info = &bl32_params_node;
}

diff --git a/src/arch/arm64/eabi_compat.c b/src/arch/arm64/eabi_compat.c
index 147cfcc..2a18d29 100644
--- a/src/arch/arm64/eabi_compat.c
+++ b/src/arch/arm64/eabi_compat.c
@@ -10,11 +10,7 @@
}

void __aeabi_unwind_cpp_pr0(void) __attribute__((used));
-void __aeabi_unwind_cpp_pr0(void)
-{
-};
+void __aeabi_unwind_cpp_pr0(void){};

void __aeabi_unwind_cpp_pr1(void) __attribute__((used));
-void __aeabi_unwind_cpp_pr1(void)
-{
-};
+void __aeabi_unwind_cpp_pr1(void){};
diff --git a/src/arch/arm64/fit_payload.c b/src/arch/arm64/fit_payload.c
index f8ae16a..d00ab73 100644
--- a/src/arch/arm64/fit_payload.c
+++ b/src/arch/arm64/fit_payload.c
@@ -9,7 +9,7 @@
#include <fit.h>
#include <endian.h>

-#define MAX_KERNEL_SIZE (64*MiB)
+#define MAX_KERNEL_SIZE (64 * MiB)

struct arm64_kernel_header {
u32 code0;
@@ -21,7 +21,7 @@
u64 res3;
u64 res4;
u32 magic;
-#define KERNEL_HEADER_MAGIC 0x644d5241
+#define KERNEL_HEADER_MAGIC 0x644d5241
u32 res5;
};

@@ -44,12 +44,10 @@
memcpy(scratch.raw, node->data, sizeof(scratch.raw));
break;
case CBFS_COMPRESS_LZMA:
- ulzman(node->data, node->size,
- scratch.raw, sizeof(scratch.raw));
+ ulzman(node->data, node->size, scratch.raw, sizeof(scratch.raw));
break;
case CBFS_COMPRESS_LZ4:
- ulz4fn(node->data, node->size,
- scratch.raw, sizeof(scratch.raw));
+ ulz4fn(node->data, node->size, scratch.raw, sizeof(scratch.raw));
break;
default:
printk(BIOS_ERR, "Unsupported compression algorithm!\n");
@@ -91,7 +89,8 @@
* depending on selected features, and is effectively unbound.
*/

- printk(BIOS_WARNING, "FIT: image_size not set in kernel header.\n"
+ printk(BIOS_WARNING,
+ "FIT: image_size not set in kernel header.\n"
"Leaving additional %u MiB of free space after kernel.\n",
MAX_KERNEL_SIZE >> 20);

@@ -161,23 +160,20 @@
}

bool fit_payload_arch(struct prog *payload, struct fit_config_node *config,
- struct region *kernel,
- struct region *fdt,
- struct region *initrd)
+ struct region *kernel, struct region *fdt, struct region *initrd)
{
bool place_anywhere;
void *arg = NULL;

if (!decompress_kernel_header(config->kernel)) {
printk(BIOS_CRIT, "Payload doesn't look like an ARM64"
- " kernel Image.\n");
+ " kernel Image.\n");
return false;
}

/* Update kernel size from image header, if possible */
kernel->size = get_kernel_size(config->kernel);
- printk(BIOS_DEBUG, "FIT: Using kernel size of 0x%zx bytes\n",
- kernel->size);
+ printk(BIOS_DEBUG, "FIT: Using kernel size of 0x%zx bytes\n", kernel->size);

/**
* The code assumes that bootmem_walk provides a sorted list of memory
diff --git a/src/arch/arm64/romstage.c b/src/arch/arm64/romstage.c
index 0c37711..71c401b 100644
--- a/src/arch/arm64/romstage.c
+++ b/src/arch/arm64/romstage.c
@@ -8,8 +8,12 @@
#include <romstage_common.h>
#include <timestamp.h>

-__weak void platform_romstage_main(void) { /* no-op, for bring-up */ }
-__weak void platform_romstage_postram(void) { /* no-op */ }
+__weak void platform_romstage_main(void)
+{ /* no-op, for bring-up */
+}
+__weak void platform_romstage_postram(void)
+{ /* no-op */
+}

#if CONFIG(SEPARATE_ROMSTAGE)
void main(void)
diff --git a/src/arch/arm64/tables.c b/src/arch/arm64/tables.c
index c66612b..66a7105 100644
--- a/src/arch/arm64/tables.c
+++ b/src/arch/arm64/tables.c
@@ -28,15 +28,13 @@
{
bootmem_add_range((uintptr_t)_ttb, REGION_SIZE(ttb), BM_MEM_RAMSTAGE);

- if (CONFIG(ARM64_USE_ARM_TRUSTED_FIRMWARE) &&
- REGION_SIZE(bl31) > 0)
- bootmem_add_range((uintptr_t)_bl31, REGION_SIZE(bl31),
- BM_MEM_BL31);
+ if (CONFIG(ARM64_USE_ARM_TRUSTED_FIRMWARE) && REGION_SIZE(bl31) > 0)
+ bootmem_add_range((uintptr_t)_bl31, REGION_SIZE(bl31), BM_MEM_BL31);

if (!CONFIG(COMMON_CBFS_SPI_WRAPPER))
return;
- bootmem_add_range((uintptr_t)_postram_cbfs_cache,
- REGION_SIZE(postram_cbfs_cache), BM_MEM_RAMSTAGE);
+ bootmem_add_range((uintptr_t)_postram_cbfs_cache, REGION_SIZE(postram_cbfs_cache),
+ BM_MEM_RAMSTAGE);
}

void lb_arch_add_records(struct lb_header *header)
diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c
index e5c83bf..b933130 100644
--- a/src/arch/arm64/transition.c
+++ b/src/arch/arm64/transition.c
@@ -52,8 +52,8 @@

/* Initialize SCR with defaults for running without secure monitor
(disable all traps, enable all instructions, run NS at AArch64). */
- raw_write_scr_el3(SCR_FIEN | SCR_API | SCR_APK | SCR_ST | SCR_RW |
- SCR_HCE | SCR_SMD | SCR_RES1 | SCR_NS);
+ raw_write_scr_el3(SCR_FIEN | SCR_API | SCR_APK | SCR_ST | SCR_RW | SCR_HCE | SCR_SMD |
+ SCR_RES1 | SCR_NS);

/* Initialize CPTR to not trap anything to EL3. */
raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |

To view, visit change 80009. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: coreboot
Gerrit-Branch: main
Gerrit-Change-Id: I4543a416d16d2689d7962e9bc69de4ba703495fb
Gerrit-Change-Number: 80009
Gerrit-PatchSet: 1
Gerrit-Owner: Martin L Roth <gaumless@gmail.com>
Gerrit-Reviewer: Julius Werner <jwerner@chromium.org>
Gerrit-Attention: Julius Werner <jwerner@chromium.org>
Gerrit-MessageType: newchange