[coreboot] Patch set updated for coreboot: ba1feda armv7/exynos/snow: new cache maintenance API

Stefan Reinauer (stefan.reinauer@coreboot.org) gerrit at coreboot.org
Tue Mar 19 19:31:56 CET 2013


Stefan Reinauer (stefan.reinauer at coreboot.org) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/2729

-gerrit

commit ba1fedae50b154b6a3b2da40fec223844b9fb415
Author: David Hendricks <dhendrix at chromium.org>
Date:   Thu Mar 14 15:24:57 2013 -0700

    armv7/exynos/snow: new cache maintenance API
    
    This adds a new API for cache maintenance operations. The idea is
    to be more explicit about operations that are going on so it's easier
    to manage branch predictor, cache, and TLB cleans and invalidations.
    
    Also, this adds some operations that were missing but required early
    on, such as branch predictor invalidation. Instruction and sync
    barriers were wrong earlier as well since the imported API assumed
    we compield with -march=armv5 (which we don't) and was missing
    wrappers for the native ARMv7 ISB/DSB/DMB instructions.
    
    For now, this is a start and it gives us something we can easily use
    in libpayload for doing things like cleaning and invalidating dcache
    when doing DMA transfers.
    
    TODO:
    - Set cache policy explicitly before re-enabling. Right now it's left
      at default.
    - Finish deprecating old cache maintenance API.
    - We do an extra icache/dcache flush when going from bootblock to
      romstage.
    
    Change-Id: I7390981190e3213f4e1431f8e56746545c5cc7c9
    Signed-off-by: David Hendricks <dhendrix at chromium.org>
---
 src/arch/armv7/bootblock_simple.c         |  71 +++++++++
 src/arch/armv7/include/arch/cache.h       | 231 ++++++++++++++++++++++++++++++
 src/arch/armv7/include/arch/io.h          |   3 +-
 src/arch/armv7/include/cache.h            |  56 --------
 src/arch/armv7/include/system.h           |   6 +-
 src/arch/armv7/lib/Makefile.inc           |   6 +
 src/arch/armv7/lib/cache-cp15.c           |   3 +-
 src/arch/armv7/lib/cache.c                | 178 +++++++++++++++++++++++
 src/arch/armv7/lib/cache_v7.c             |   2 +-
 src/arch/armv7/stages.c                   |   5 +-
 src/cpu/samsung/exynos5250/Makefile.inc   |   1 -
 src/cpu/samsung/exynos5250/exynos_cache.c |  11 --
 src/mainboard/google/snow/romstage.c      |  26 +---
 13 files changed, 502 insertions(+), 97 deletions(-)

diff --git a/src/arch/armv7/bootblock_simple.c b/src/arch/armv7/bootblock_simple.c
index 0132b87..7012e17 100644
--- a/src/arch/armv7/bootblock_simple.c
+++ b/src/arch/armv7/bootblock_simple.c
@@ -20,6 +20,7 @@
  */
 
 #include <bootblock_common.h>
+#include <arch/cache.h>
 #include <arch/hlt.h>
 #include <arch/stages.h>
 #include <cbfs.h>
@@ -27,6 +28,58 @@
 
 #include "stages.c"
 
+static void armv7_invalidate_caches(void)
+{
+	uint32_t clidr;
+	int level;
+
+	/* Invalidate branch predictor */
+	bpiall();
+
+	/* Iterate thru each cache identified in CLIDR and invalidate */
+	clidr = read_clidr();
+	for (level = 0; level < 7; level++) {
+		unsigned int ctype = (clidr >> (level * 3)) & 0x7;
+		uint32_t csselr;
+
+		switch(ctype) {
+		case 0x0:
+			/* no cache */
+			break;
+		case 0x1:
+			/* icache only */
+			csselr = (level << 1) | 1;
+			write_csselr(csselr);
+			icache_invalidate_all();
+			break;
+		case 0x2:
+		case 0x4:
+			/* dcache only or unified cache */
+			dcache_invalidate_all();
+			break;
+		case 0x3:
+			/* separate icache and dcache */
+			csselr = (level << 1) | 1;
+			write_csselr(csselr);
+			icache_invalidate_all();
+
+			csselr = level < 1;
+			write_csselr(csselr);
+			dcache_invalidate_all();
+			break;
+		default:
+			/* reserved */
+			break;
+		}
+	}
+
+	/* Invalidate TLB */
+	/* FIXME: ARMv7 Architecture Ref. Manual claims that the distinction
+	 * instruction vs. data TLBs is deprecated in ARMv7. But that doesn't
+	 * really seem true for Cortex-A15? */
+	tlb_invalidate_all();
+}
+
 static int boot_cpu(void)
 {
 	/*
@@ -41,6 +94,24 @@ void main(void)
 {
 	const char *stage_name = "fallback/romstage";
 	void *entry;
+	uint32_t sctlr;
+
+	/* Globally disable MMU, caches, and branch prediction (these should
+	 * be disabled by default on reset) */
+	sctlr = read_sctlr();
+	sctlr &= ~(SCTLR_M | SCTLR_C | SCTLR_Z | SCTLR_I);
+	write_sctlr(sctlr);
+
+	armv7_invalidate_caches();
+
+	/*
+	 * Re-enable caches and branch prediction. MMU will be set up later.
+	 * Note: If booting from USB, we need to disable branch prediction
+	 * before copying from USB into RAM (FIXME: why?)
+	 */
+	sctlr = read_sctlr();
+	sctlr |= SCTLR_C | SCTLR_Z | SCTLR_I;
+	write_sctlr(sctlr);
 
 	if (boot_cpu()) {
 		bootblock_cpu_init();
diff --git a/src/arch/armv7/include/arch/cache.h b/src/arch/armv7/include/arch/cache.h
new file mode 100644
index 0000000..5125b8c
--- /dev/null
+++ b/src/arch/armv7/include/arch/cache.h
@@ -0,0 +1,231 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef ARMV7_CACHE_H
+#define ARMV7_CACHE_H
+
+/* SCTLR bits */
+#define SCTLR_M		(1 << 0)	/* MMU enable			*/
+#define SCTLR_A		(1 << 1)	/* Alignment check enable	*/
+#define SCTLR_C		(1 << 2)	/* Data/unified cache enable	*/
+/* Bits 4:3 are reserved */
+#define SCTLR_CP15BEN	(1 << 5)	/* CP15 barrier enable		*/
+/* Bit 6 is reserved */
+#define SCTLR_B		(1 << 7)	/* Endianness			*/
+/* Bits 9:8 */
+#define SCTLR_SW	(1 << 10)	/* SWP and SWPB enable		*/
+#define SCTLR_Z		(1 << 11)	/* Branch prediction enable	*/
+#define SCTLR_I		(1 << 12)	/* Instruction cache enable	*/
+#define SCTLR_V		(1 << 13)	/* Low/high exception vectors 	*/
+#define SCTLR_RR  	(1 << 14)	/* Round Robin select		*/
+/* Bits 16:15 are reserved */
+#define SCTLR_HA	(1 << 17)	/* Hardware Access flag enable	*/
+/* Bit 18 is reserved */
+/* Bits 20:19 reserved virtualization not supported */
+#define SCTLR_WXN	(1 << 19)	/* Write permission implies XN	*/
+#define SCTLR_UWXN	(1 << 20)	/* Unprivileged write permission
+					   implies PL1 XN		*/
+#define SCTLR_FI	(1 << 21)	/* Fast interrupt config enable	*/
+#define SCTLR_U		(1 << 22)	/* Unaligned access behavior	*/
+#define SCTLR_VE	(1 << 24)	/* Interrupt vectors enable	*/
+#define SCTLR_EE	(1 << 25)	/* Exception endianness		*/
+/* Bit 26 is reserved */
+#define SCTLR_NMFI	(1 << 27)	/* Non-maskable FIQ support	*/
+#define SCTLR_TRE	(1 << 28)	/* TEX remap enable		*/
+#define SCTLR_AFE	(1 << 29)	/* Access flag enable		*/
+#define SCTLR_TE	(1 << 30)	/* Thumb exception enable	*/
+/* Bit 31 is reserved */
+
+/*
+ * Sync primitives
+ */
+
+/* data memory barrier */
+static inline void dmb(void)
+{
+	asm volatile ("dmb" : : : "memory");
+}
+
+/* data sync barrier */
+static inline void dsb(void)
+{
+	asm volatile ("dsb" : : : "memory");
+}
+
+/* instruction sync barrier */
+static inline void isb(void)
+{
+	asm volatile ("isb" : : : "memory");
+}
+
+/*
+ * Low-level TLB maintenance operations
+ */
+
+/* invalidate entire data TLB */
+static inline void dtlbiall(void)
+{
+	asm volatile ("mcr p15, 0, %0, c8, c6, 0" : : "r" (0));
+}
+
+/* invalidate entire instruction TLB */
+static inline void itlbiall(void)
+{
+	asm volatile ("mcr p15, 0, %0, c8, c5, 0" : : "r" (0));
+}
+
+/* invalidate entire unified TLB */
+static inline void tlbiall(void)
+{
+	asm volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
+}
+
+/*
+ * Low-level cache maintenance operations
+ */
+
+/* branch predictor invalidate all */
+static inline void bpiall(void)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0));
+}
+
+/* data cache clean and invalidate by MVA to PoC */
+static inline void dccimvac(unsigned long mva)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" (mva));
+}
+
+/* data cache invalidate by set/way */
+static inline void dccisw(uint32_t val)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
+}
+
+/* data cache invalidate by set/way */
+static inline void dcisw(uint32_t val)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c6, 2" : : "r" (val));
+}
+
+/* data cache clean by MVA to PoC */
+static inline void dccmvac(unsigned long mva)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c10, 1" : : "r" (mva));
+}
+
+/* data cache invalidate by MVA to PoC */
+static inline void dcimvac(unsigned long mva)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (mva));
+}
+
+/* instruction cache invalidate all by PoU */
+static inline void iciallu(void)
+{
+	asm volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
+}
+
+/*
+ * Cache co-processor (CP15) access functions
+ */
+
+/* read cache level ID register (CLIDR) */
+static inline uint32_t read_clidr(void)
+{
+	uint32_t val = 0;
+	asm volatile ("mrc p15, 1, %0, c0, c0, 1" : "=r" (val));
+	return val;
+}
+
+/* read cache size ID register register (CCSIDR) */
+static inline uint32_t read_ccsidr(void)
+{
+	uint32_t val = 0;
+	asm volatile ("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
+	return val;
+}
+
+/* read cache size selection register (CSSELR) */
+static inline uint32_t read_csselr(void)
+{
+	uint32_t val = 0;
+	asm volatile ("mrc p15, 2, %0, c0, c0, 0" : "=r" (val));
+	return val;
+}
+
+/* write to cache size selection register (CSSELR) */
+static inline void write_csselr(uint32_t val)
+{
+	/*
+	 * Bits [3:1] - Cache level + 1 (0b000 = L1, 0b110 = L7, 0b111 is rsvd)
+	 * Bit 0 - 0 = data or unified cache, 1 = instruction cache
+	 */
+	asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r" (val));
+	isb();	/* ISB to sync the change to CCSIDR */
+}
+
+/* read system control register (SCTLR) */
+static inline unsigned int read_sctlr(void)
+{
+	unsigned int val;
+	asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (val) : : "cc");
+	return val;
+}
+
+/* write system control register (SCTLR) */
+static inline void write_sctlr(unsigned int val)
+{
+	asm volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val) : "cc");
+	isb();
+}
+
+/*
+ * Cache maintenance API
+ */
+
+/* invalidate all TLBs */
+void tlb_invalidate_all(void);
+
+/* clean and invalidate entire dcache on current level (given by CCSELR) */
+void dcache_clean_invalidate_all(void);
+
+/* invalidate entire dcache on current level (given by CCSELR) */
+void dcache_invalidate_all(void);
+
+/* invalidate and clean dcache by machine virtual address to PoC */
+void dcache_clean_invalidate_by_mva(unsigned long addr, unsigned long len);
+
+/* invalidate entire icache on current level (given by CSSELR) */
+void icache_invalidate_all(void);
+
+/* MMU setup by machine virtual address */
+void mmu_setup_by_mva(unsigned long start, unsigned long size);
+
+#endif /* ARMV7_CACHE_H */
diff --git a/src/arch/armv7/include/arch/io.h b/src/arch/armv7/include/arch/io.h
index 3bbd529..623c305 100644
--- a/src/arch/armv7/include/arch/io.h
+++ b/src/arch/armv7/include/arch/io.h
@@ -21,6 +21,7 @@
 #define __ASM_ARM_IO_H
 
 #include <types.h>
+#include <arch/cache.h>		/* for dmb() */
 #include <arch/byteorder.h>
 
 static inline void sync(void)
@@ -96,7 +97,7 @@ extern inline void __raw_readsl(unsigned int addr, void *data, int longlen)
  * TODO: The kernel offers some more advanced versions of barriers, it might
  * have some advantages to use them instead of the simple one here.
  */
-#define dmb()		__asm__ __volatile__ ("" : : : "memory")
+//#define dmb()		__asm__ __volatile__ ("" : : : "memory")
 #define __iormb()	dmb()
 #define __iowmb()	dmb()
 
diff --git a/src/arch/armv7/include/cache.h b/src/arch/armv7/include/cache.h
deleted file mode 100644
index cf8fb5a..0000000
--- a/src/arch/armv7/include/cache.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * (C) Copyright 2009
- * Marvell Semiconductor <www.marvell.com>
- * Written-by: Prafulla Wadaskar <prafulla at marvell.com>
- *
- * See file CREDITS for list of people who contributed to this
- * project.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA
- */
-
-#ifndef _ASM_CACHE_H
-#define _ASM_CACHE_H
-
-/*
- * Invalidate L2 Cache using co-proc instruction
- */
-static inline void invalidate_l2_cache(void)
-{
-	unsigned int val=0;
-
-	asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
-		: : "r" (val) : "cc");
-	isb();
-}
-
-void l2_cache_enable(void);
-void l2_cache_disable(void);
-
-/*
- * The current upper bound for ARM L1 data cache line sizes is 64 bytes.  We
- * use that value for aligning DMA buffers unless the board config has specified
- * an alternate cache line size.
- */
-#ifdef CONFIG_SYS_CACHELINE_SIZE
-#define ARCH_DMA_MINALIGN	CONFIG_SYS_CACHELINE_SIZE
-#else
-#define ARCH_DMA_MINALIGN	64
-#endif
-
-inline void dram_bank_mmu_setup(unsigned long start, unsigned long size);
-
-#endif /* _ASM_CACHE_H */
diff --git a/src/arch/armv7/include/system.h b/src/arch/armv7/include/system.h
index 053df8d..f3e9b6b 100644
--- a/src/arch/armv7/include/system.h
+++ b/src/arch/armv7/include/system.h
@@ -43,13 +43,15 @@
  */
 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
 
-#define isb() __asm__ __volatile__ ("" : : : "memory")
+/* FIXME: conflicts with new implementation in cache.c */
+//#define isb() __asm__ __volatile__ ("" : : : "memory")
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
 #define arch_align_stack(x) (x)
 
 #ifndef __ASSEMBLER__
+#include <arch/cache.h>	/* for isb() */
 static inline unsigned int get_cr(void)
 {
 	unsigned int val;
@@ -97,6 +99,8 @@ void mmu_page_table_flush(unsigned long start, unsigned long stop);
 
 void mmu_setup(unsigned long start, unsigned long size);
 
+void v7_inval_tlb(void);
+
 void arm_init_before_mmu(void);
 
  /*
diff --git a/src/arch/armv7/lib/Makefile.inc b/src/arch/armv7/lib/Makefile.inc
index 508f776..c248b9e 100644
--- a/src/arch/armv7/lib/Makefile.inc
+++ b/src/arch/armv7/lib/Makefile.inc
@@ -1,8 +1,12 @@
+#FIXME: cache_v7 and cache-cp15 will go away eventually
+
 bootblock-y += syslib.c
 bootblock-$(CONFIG_EARLY_CONSOLE) += early_console.c
+bootblock-y += cache.c
 bootblock-y += cache_v7.c
 bootblock-y += cache-cp15.c
 
+romstage-y += cache.c
 romstage-y += cache_v7.c
 romstage-y += cache-cp15.c
 romstage-y += div0.c
@@ -14,7 +18,9 @@ ramstage-y += div0.c
 #ramstage-y += memcpy.S
 #ramstage-y += memset.S
 ramstage-y += syslib.c
+ramstage-y += cache.c
 ramstage-y += cache_v7.c
+ramstage-y += cache-cp15.c
 
 #FIXME(dhendrix): should this be a config option?
 romstage-y += eabi_compat.c
diff --git a/src/arch/armv7/lib/cache-cp15.c b/src/arch/armv7/lib/cache-cp15.c
index e08ea57..32f3c79 100644
--- a/src/arch/armv7/lib/cache-cp15.c
+++ b/src/arch/armv7/lib/cache-cp15.c
@@ -123,8 +123,7 @@ inline void mmu_setup(unsigned long start, unsigned long size_mb)
 	int i;
 	u32 reg;
 
-	arm_init_before_mmu();
-
+//	arm_init_before_mmu();
 	/* Set up an identity-mapping for all 4GB, rw for everyone */
 	for (i = 0; i < 4096; i++)
 		set_section_dcache(i, DCACHE_OFF);
diff --git a/src/arch/armv7/lib/cache.c b/src/arch/armv7/lib/cache.c
new file mode 100644
index 0000000..62ae755
--- /dev/null
+++ b/src/arch/armv7/lib/cache.c
@@ -0,0 +1,178 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * cache.c: Low-level cache operations for ARMv7
+ */
+
+#include <types.h>
+
+#include <arch/cache.h>
+
+#define bitmask(high, low) ((1UL << (high)) + \
+			((1UL << (high)) - 1) - ((1UL << (low)) - 1))
+
+/* Basic log2() implementation. Note: log2(0) is 0 for our purposes. */
+/* FIXME: src/include/lib.h is difficult to work with due to romcc */
+static unsigned long log2(unsigned long u)
+{
+	int i = 0;
+
+	while (u >>= 1)
+		i++;
+
+	return i;
+}
+
+void tlb_invalidate_all(void)
+{
+	/*
+	 * FIXME: ARMv7 Architecture Ref. Manual claims that the distinction
+	 * instruction vs. data TLBs is deprecated in ARMv7. But that doesn't
+	 * really seem true for Cortex-A15?
+	 */
+	tlbiall();
+	dtlbiall();
+	itlbiall();
+	isb();
+	dsb();
+}
+
+void icache_invalidate_all(void)
+{
+	/* icache can be entirely invalidated with one operation.
+	 * Note: If branch predictors are architecturally-visible, ICIALLU
+	 * also performs a BPIALL operation (B2-1283 in arch manual)
+	 */
+	iciallu();
+	isb();
+}
+
+enum dcache_op {
+	OP_DCCISW,
+	OP_DCISW
+};
+
+/* do a dcache operation on entire cache by set/way */
+static void dcache_op_set_way(enum dcache_op op)
+{
+	uint32_t ccsidr;
+	unsigned int associativity, num_sets, linesize_bytes;
+	unsigned int set, way;
+	unsigned int level;
+
+	level = (read_csselr() >> 1) & 0x7;
+
+	/*
+	 * dcache must be invalidated by set/way for portability since virtual
+	 * memory mapping is system-defined. The number of sets and
+	 * associativity is given by CCSIDR. We'll use DCISW to invalidate the
+	 * dcache.
+	 */
+	ccsidr = read_ccsidr();
+
+	/* FIXME: rounding up required here? */
+	num_sets = ((ccsidr & bitmask(27, 13)) >> 13) + 1;
+	associativity = ((ccsidr & bitmask(12, 3)) >> 3) + 1;
+	/* FIXME: do we need to use CTR.DminLine here? */
+	linesize_bytes = (1 << ((ccsidr & 0x7) + 2)) * 4;
+
+	/*
+	 * Set/way operations require an interesting bit packing. See section
+	 * B4-35 in the ARMv7 Architecture Reference Manual:
+	 *
+	 * A: Log2(associativity)
+	 * B: L+S
+	 * L: Log2(linesize)
+	 * S: Log2(num_sets)
+	 *
+	 * The bits are packed as follows:
+	 *  31  31-A        B B-1    L L-1   4 3   1 0
+	 * |---|-------------|--------|-------|-----|-|
+	 * |Way|    zeros    |   Set  | zeros |level|0|
+	 * |---|-------------|--------|-------|-----|-|
+	 */
+	for (way = 0; way < associativity; way++) {
+		for (set = 0; set < num_sets; set++) {
+			uint32_t val = 0;
+			val |= way << (32 - log2(associativity));
+			val |= set << log2(linesize_bytes);
+			val |= level << 1;
+			switch(op) {
+			case OP_DCCISW:
+				dccisw(val);
+				break;
+			case OP_DCISW:
+				dcisw(val);
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	dsb();
+}
+
+void dcache_clean_invalidate_all(void)
+{
+	dcache_op_set_way(OP_DCCISW);
+}
+
+void dcache_invalidate_all(void)
+{
+	dcache_op_set_way(OP_DCISW);
+}
+
+static unsigned int line_bytes(void)
+{
+	uint32_t ccsidr;
+	unsigned int size;
+
+	ccsidr = read_ccsidr();
+	/* [2:0] - Indicates (Log2(number of words in cache line)) - 2 */
+	size = 1 << ((ccsidr & 0x7) + 2);	/* words per line */
+	size *= sizeof(unsigned int);		/* bytes per line */
+
+	return size;
+}
+
+void dcache_clean_invalidate_by_mva(unsigned long addr, unsigned long len)
+{
+	unsigned long line, i;
+
+	line = line_bytes();
+	for (i = addr & ~(line - 1); i < addr + len - 1; i += line)
+		dccimvac(addr);
+}
+
+/* FIXME: wrapper around imported mmu_setup() for now */
+extern void mmu_setup(unsigned long start, unsigned long size);
+void mmu_setup_by_mva(unsigned long start, unsigned long size)
+{
+	mmu_setup(start, size);
+}
diff --git a/src/arch/armv7/lib/cache_v7.c b/src/arch/armv7/lib/cache_v7.c
index 31072c7..1764351 100644
--- a/src/arch/armv7/lib/cache_v7.c
+++ b/src/arch/armv7/lib/cache_v7.c
@@ -226,7 +226,7 @@ static void v7_dcache_maint_range(u32 start, u32 stop, u32 range_op)
 }
 
 /* Invalidate TLB */
-static void v7_inval_tlb(void)
+void v7_inval_tlb(void)
 {
 	/* Invalidate entire unified TLB */
 	asm volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
diff --git a/src/arch/armv7/stages.c b/src/arch/armv7/stages.c
index c37c1dd..038ed7c 100644
--- a/src/arch/armv7/stages.c
+++ b/src/arch/armv7/stages.c
@@ -33,6 +33,7 @@
 
 #include <arch/stages.h>
 #include <arch/armv7/include/common.h>
+#include <arch/cache.h>
 
 void stage_entry(void)
 {
@@ -50,10 +51,10 @@ void stage_exit(void *addr)
 	/* make sure any code we installed is written to memory. Not all ARM have
 	 * unified caches.
 	 */
-	flush_dcache_all();
+	dcache_clean_invalidate_all();
 	/* Because most stages copy code to memory, it's a safe and hygienic thing
 	 * to flush the icache here.
 	 */
-	invalidate_icache_all();
+	icache_invalidate_all();
 	doit();
 }
diff --git a/src/cpu/samsung/exynos5250/Makefile.inc b/src/cpu/samsung/exynos5250/Makefile.inc
index 2774b12..961b719 100644
--- a/src/cpu/samsung/exynos5250/Makefile.inc
+++ b/src/cpu/samsung/exynos5250/Makefile.inc
@@ -30,7 +30,6 @@ ramstage-y += power.c
 ramstage-y += soc.c
 ramstage-$(CONFIG_CONSOLE_SERIAL_UART) += uart.c
 ramstage-y += cpu.c
-ramstage-y += exynos_cache.c
 
 #ramstage-$(CONFIG_SATA_AHCI) += sata.c
 
diff --git a/src/cpu/samsung/exynos5250/exynos_cache.c b/src/cpu/samsung/exynos5250/exynos_cache.c
index 7f4effe..2cb918d 100644
--- a/src/cpu/samsung/exynos5250/exynos_cache.c
+++ b/src/cpu/samsung/exynos5250/exynos_cache.c
@@ -33,17 +33,6 @@ enum l2_cache_params {
 	CACHE_DATA_RAM_LATENCY = (2<<0)
 };
 
-
-/* FIXME(dhendrix): maybe move this to a romstage-specific file? */
-#ifdef __PRE_RAM__
-void enable_caches(void)
-{
-	/* Enable D-cache. I-cache is already enabled in start.S */
-	/* can't use it anyway -- it has dependencies we have to fix. */
-	//dcache_enable();
-}
-#endif
-
 /*
  * Set L2 cache parameters
  */
diff --git a/src/mainboard/google/snow/romstage.c b/src/mainboard/google/snow/romstage.c
index ea2feec..bfb4156 100644
--- a/src/mainboard/google/snow/romstage.c
+++ b/src/mainboard/google/snow/romstage.c
@@ -18,12 +18,12 @@
  */
 
 #include <types.h>
-#include <system.h>
 
-#include <cache.h>
+#include <armv7.h>
 #include <cbfs.h>
 #include <common.h>
 
+#include <arch/cache.h>
 #include <arch/gpio.h>
 #include <cpu/samsung/exynos5250/clk.h>
 #include <cpu/samsung/exynos5250/dmc.h>
@@ -52,20 +52,6 @@ static int board_wakeup_permitted(void)
 }
 #endif
 
-/*
- * Set/clear program flow prediction and return the previous state.
- */
-static int config_branch_prediction(int set_cr_z)
-{
-	unsigned int cr;
-
-	/* System Control Register: 11th bit Z Branch prediction enable */
-	cr = get_cr();
-	set_cr(set_cr_z ? cr | CR_Z : cr & ~CR_Z);
-
-	return cr & CR_Z;
-}
-
 static void initialize_s5p_mshc(void)
 {
 	/* MMC0: Fixed, 8 bit mode, connected with GPIO. */
@@ -95,10 +81,6 @@ void main(void)
 	int ret;
 	void *entry;
 
-	/* FIXME: if we boot from USB, we need to disable branch prediction
-	 * before copying from USB into RAM */
-	config_branch_prediction(1);
-
 	clock_set_rate(PERIPH_ID_SPI1, 50000000); /* set spi clock to 50Mhz */
 
 	/* Clock must be initialized before console_init, otherwise you may need
@@ -108,7 +90,6 @@ void main(void)
 	system_clock_init(mem, arm_ratios);
 
 	console_init();
-
 	/*
 	 * FIXME: Do necessary I2C init so low-level PMIC code doesn't need to.
 	 * Also, we should only call power_init() on cold boot.
@@ -132,7 +113,8 @@ void main(void)
 		while(1);
 	}
 
-	mmu_setup(CONFIG_SYS_SDRAM_BASE, CONFIG_DRAM_SIZE_MB);
+	/* Set up MMU and caches */
+	mmu_setup_by_mva(CONFIG_SYS_SDRAM_BASE, CONFIG_DRAM_SIZE_MB);
 
 	initialize_s5p_mshc();
 



More information about the coreboot mailing list