Clean up use of SDR1 fields.
IBM's "The Programming Environments for 32-Bit Microprocessors": The HTABMASK field in SDR1 contains a mask value that determines how many bits from the hash are used in the page table index. This mask must be of the form 0b00...011...1; that is, a string of 0 bits followed by a string of 1bits. The 1 bits determine how many additional bits (at least 10) from the hash are used in the index; HTABORG must have this same number of low-order bits equal to 0.
IBM's "The Programming Environments Manual for 64-bit Microprocessors" 3.0: The HTABSIZE field in SDR1 contains an integer value that determines how many bits from the hash are used in the page table index. This number must not exceed 28. HTABSIZE is used to generate a mask of the form 0b00...011...1; that is, a string of 0 bits followed by a string of 1-bits. The 1-bits determine how many additional bits (beyond the minimum of 11) from the hash are used in the index. The HTABORG must have this same number of low-order bits equal to 0.
Adjust alignment mask and hash size for ppc64.
Note that the page table does not yet scale with the memory size, and the secondary hash is not yet used.
v3: * Fix shifts. Thanks to Segher and Alex. * Fix HTABSIZE for 32-bit ppc64: ((2 << 15) - 1) >> 16 happened to be 0, so that it worked as expected. Avoid underflow, spotted by Alex. Make this fix available for legacy 64-bit support as well. * Drop get_hash_size(). It was only used in hash_page_32() and caused unnecessary conversions between HTABMASK and size. * Introduce {mfsdr1,mtsdr1} as suggested by Alex. Note that this changes the volatility for hash_page_64(). * Conditionalize 64-bit support, drop 32-bit support for ppc64. * Adjust hash size for ppc64.
v2: * Use HTABSIZE for ppc64, fix HTABMASK usage for ppc. Error spotted by Segher.
Cc: Alexander Graf agraf@suse.de Cc: Segher Boessenkool segher@kernel.crashing.org Signed-off-by: Andreas Färber andreas.faerber@web.de --- arch/ppc/qemu/ofmem.c | 65 +++++++++++++++++++++++++++-------------- include/arch/ppc/processor.h | 12 ++++++++ 2 files changed, 55 insertions(+), 22 deletions(-)
diff --git a/arch/ppc/qemu/ofmem.c b/arch/ppc/qemu/ofmem.c index 72694b3..40a23ff 100644 --- a/arch/ppc/qemu/ofmem.c +++ b/arch/ppc/qemu/ofmem.c @@ -48,7 +48,12 @@ extern void setup_mmu( unsigned long code_base ); #define OF_CODE_START 0xfff00000UL #define IO_BASE 0x80000000
-#define HASH_SIZE (2 << 15) +#ifdef __powerpc64__ +#define HASH_BITS 18 +#else +#define HASH_BITS 15 +#endif +#define HASH_SIZE (2 << HASH_BITS) #define OFMEM_SIZE (2 * 1024 * 1024)
#define SEGR_USER BIT(2) @@ -57,21 +62,11 @@ extern void setup_mmu( unsigned long code_base ); static inline unsigned long get_hash_base( void ) { - unsigned long sdr1; - - asm volatile("mfsdr1 %0" : "=r" (sdr1) ); - - return (sdr1 & 0xffff0000); -} - -static inline unsigned long -get_hash_size( void ) -{ - unsigned long sdr1; - - asm volatile("mfsdr1 %0" : "=r" (sdr1) ); - - return ((sdr1 << 16) | 0x0000ffff) + 1; +#ifdef __powerpc64__ + return (mfsdr1() & 0x3FFFFFFFFFFC0000UL); +#else + return (mfsdr1() & 0xffff0000UL); +#endif }
static inline unsigned long @@ -228,6 +223,11 @@ ea_to_phys( ucell ea, ucell *mode ) return phys; }
+#if defined(__powerpc64__) || defined(CONFIG_PPC_64BITSUPPORT) +#ifndef CONFIG_PPC_64BITSUPPORT +#define hash_page_64 hash_page +#endif + static void hash_page_64( ucell ea, ucell phys, ucell mode ) { @@ -242,7 +242,7 @@ hash_page_64( ucell ea, ucell phys, ucell mode ) vsid = (ea >> 28) + SEGR_BASE; vsid_sh = 7; vsid_mask = 0x00003FFFFFFFFF80ULL; - asm ( "mfsdr1 %0" : "=r" (sdr) ); + sdr = mfsdr1(); sdr_sh = 18; sdr_mask = 0x3FF80; page_mask = 0x0FFFFFFF; // XXX correct? @@ -294,6 +294,12 @@ hash_page_64( ucell ea, ucell phys, ucell mode ) asm volatile( "tlbie %0" :: "r"(ea) ); }
+#endif /* ppc64 */ +#ifndef __powerpc64__ +#ifndef CONFIG_PPC_64BITSUPPORT +#define hash_page_32 hash_page +#endif + static void hash_page_32( ucell ea, ucell phys, ucell mode ) { @@ -307,7 +313,7 @@ hash_page_32( ucell ea, ucell phys, ucell mode )
hash1 = vsid; hash1 ^= (ea >> 12) & 0xffff; - hash1 &= (get_hash_size() - 1) >> 6; + hash1 &= (((mfsdr1() & 0x1ff) << 16) | 0xffff) >> 6;
pp = (mPTE_t*)(get_hash_base() + (hash1 << 6)); upte = (unsigned long*)pp; @@ -334,6 +340,8 @@ hash_page_32( ucell ea, ucell phys, ucell mode ) asm volatile( "tlbie %0" :: "r"(ea) ); }
+#ifdef CONFIG_PPC_64BITSUPPORT + static int is_ppc64(void) { unsigned int pvr; @@ -350,6 +358,9 @@ static void hash_page( unsigned long ea, unsigned long phys, ucell mode ) hash_page_32(ea, phys, mode); }
+#endif /* CONFIG_PPC_64BITSUPPORT */ +#endif /* ppc */ + void dsi_exception( void ) { @@ -387,17 +398,27 @@ void setup_mmu( unsigned long ramsize ) { ofmem_t *ofmem; - unsigned long sdr1, sr_base; + unsigned long sr_base; unsigned long hash_base; - unsigned long hash_mask = 0xfff00000; /* alignment for ppc64 */ + unsigned long hash_mask = ~0x000fffffUL; /* alignment for ppc64 */ int i;
/* SDR1: Storage Description Register 1 */
hash_base = (ramsize - 0x00100000 - HASH_SIZE) & hash_mask; memset((void *)hash_base, 0, HASH_SIZE); - sdr1 = hash_base | ((HASH_SIZE-1) >> 16); - asm volatile("mtsdr1 %0" :: "r" (sdr1) ); +#if defined(__powerpc64__) || defined(CONFIG_PPC_64BITSUPPORT) +#ifdef CONFIG_PPC_64BITSUPPORT + if (is_ppc64()) +#endif + mtsdr1(hash_base | MAX(HASH_BITS - 18, 0)); +#ifdef CONFIG_PPC_64BITSUPPORT + else +#endif +#endif +#ifndef __powerpc64__ + mtsdr1(hash_base | ((HASH_SIZE - 1) >> 16)); +#endif
/* Segment Register */
diff --git a/include/arch/ppc/processor.h b/include/arch/ppc/processor.h index c7d5be6..329c5cd 100644 --- a/include/arch/ppc/processor.h +++ b/include/arch/ppc/processor.h @@ -425,6 +425,18 @@ static inline void mtmsr(unsigned long msr) #endif }
+static inline unsigned long mfsdr1(void) +{ + unsigned long sdr1; + asm volatile("mfsdr1 %0" : "=r" (sdr1)); + return sdr1; +} + +static inline void mtsdr1(unsigned long sdr1) +{ + asm volatile("mtsdr1 %0" :: "r" (sdr1)); +} + #endif /* !__ASSEMBLER__ */
#endif /* _H_PROCESSOR */