Set up SLBs with slbmte instead of mtsrin, suggested by Alex. Adopt SLB example code from IBM application note.
Cc: Alexander Graf agraf@suse.de Signed-off-by: Andreas Färber andreas.faerber@web.de --- arch/ppc/qemu/ofmem.c | 35 ++++++++++++++++++++++++++++++++--- 1 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/arch/ppc/qemu/ofmem.c b/arch/ppc/qemu/ofmem.c index e8b0b24..85b9956 100644 --- a/arch/ppc/qemu/ofmem.c +++ b/arch/ppc/qemu/ofmem.c @@ -393,7 +393,7 @@ void setup_mmu( unsigned long ramsize ) { ofmem_t *ofmem; - unsigned long sdr1, sr_base, msr; + unsigned long sdr1, msr; unsigned long hash_base; unsigned long hash_mask = 0xfff00000; /* alignment for ppc64 */ int i; @@ -405,13 +405,42 @@ setup_mmu( unsigned long ramsize ) sdr1 = hash_base | ((HASH_SIZE-1) >> 16); asm volatile("mtsdr1 %0" :: "r" (sdr1) );
+#ifdef __powerpc64__ +#define SLB_SIZE 64 +#else +#define SLB_SIZE 16 +#endif +#if 1//def __powerpc64__ +#if 1 + /* Initialize SLBs */ + for (i = 0; i < SLB_SIZE; i++) { + unsigned long rs = (i << 12) | (0 << 7); + unsigned long rb = ((unsigned long)i << 28) | (0 << 27) | i; + asm volatile("slbmte %0,%1" :: "r" (rs), "r" (rb) : "memory"); + } + /* Invalidate SLBs */ + for (i = 1; i < SLB_SIZE; i++) { + unsigned long rb = ((unsigned long)i << 28) | (0 << 27); + asm volatile("slbie %0" :: "r" (rb) : "memory"); + } +#endif + /* Set SLBs */ + for (i = 0; i < 16; i++) { + unsigned long rs = ((0x400 + i) << 12) | (0x10 << 7); + unsigned long rb = ((unsigned long)i << 28) | (1 << 27) | i; + asm volatile("slbmte %0,%1" :: "r" (rs), "r" (rb) : "memory"); + } + asm volatile("isync" ::: "memory"); +#else /* Segment Register */ - - sr_base = SEGR_USER | SEGR_BASE ; + { + unsigned long sr_base = SEGR_USER | SEGR_BASE ; for( i=0; i<16; i++ ) { int j = i << 28; asm volatile("mtsrin %0,%1" :: "r" (sr_base + i), "r" (j) ); } + } +#endif
ofmem = ofmem_arch_get_private(); memset(ofmem, 0, sizeof(ofmem_t));