Set up SLBs with slbmte instead of mtsrin, suggested by Alex.
v2: * Don't initialize 64 SLBs, then invalidate them, as in IBM's application note for the 970. Use slbia instead, recommended by Alex. * Conditionalize when to use SLB or SR.
Cc: Alexander Graf agraf@suse.de Signed-off-by: Andreas Färber andreas.faerber@web.de --- arch/ppc/qemu/ofmem.c | 25 ++++++++++++++++++++++--- 1 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/ppc/qemu/ofmem.c b/arch/ppc/qemu/ofmem.c index 72694b3..24f3a25 100644 --- a/arch/ppc/qemu/ofmem.c +++ b/arch/ppc/qemu/ofmem.c @@ -387,7 +387,7 @@ void setup_mmu( unsigned long ramsize ) { ofmem_t *ofmem; - unsigned long sdr1, sr_base; + unsigned long sdr1; unsigned long hash_base; unsigned long hash_mask = 0xfff00000; /* alignment for ppc64 */ int i; @@ -399,13 +399,32 @@ setup_mmu( unsigned long ramsize ) sdr1 = hash_base | ((HASH_SIZE-1) >> 16); asm volatile("mtsdr1 %0" :: "r" (sdr1) );
+#if defined(__powerpc64__) || defined(CONFIG_PPC_64BITSUPPORT) +#ifdef CONFIG_PPC_64BITSUPPORT + if (is_ppc64()) { +#endif + /* Segment Lookaside Buffer */ + asm volatile("slbia" ::: "memory"); + for (i = 0; i < 16; i++) { + unsigned long rs = ((0x400 + i) << 12) | (0x10 << 7); + unsigned long rb = ((unsigned long)i << 28) | (1 << 27) | i; + asm volatile("slbmte %0,%1" :: "r" (rs), "r" (rb) : "memory"); + } + asm volatile("isync" ::: "memory"); +#ifdef CONFIG_PPC_64BITSUPPORT + } else +#endif +#endif +#ifndef __powerpc64__ /* Segment Register */ - - sr_base = SEGR_USER | SEGR_BASE ; + { + unsigned long sr_base = SEGR_USER | SEGR_BASE ; for( i=0; i<16; i++ ) { int j = i << 28; asm volatile("mtsrin %0,%1" :: "r" (sr_base + i), "r" (j) ); } + } +#endif
ofmem = ofmem_arch_get_private(); memset(ofmem, 0, sizeof(ofmem_t));