Clean up use of SDR1 fields.
IBM's "The Programming Environments for 32-Bit Microprocessors": The HTABMASK field in SDR1 contains a mask value that determines how many bits from the hash are used in the page table index. This mask must be of the form 0b00...011...1; that is, a string of 0 bits followed by a string of 1bits. The 1 bits determine how many additional bits (at least 10) from the hash are used in the index; HTABORG must have this same number of low-order bits equal to 0.
IBM's "The Programming Environments Manual for 64-bit Microprocessors" 3.0: The HTABSIZE field in SDR1 contains an integer value that determines how many bits from the hash are used in the page table index. This number must not exceed 28. HTABSIZE is used to generate a mask of the form 0b00...011...1; that is, a string of 0 bits followed by a string of 1-bits. The 1-bits determine how many additional bits (beyond the minimum of 11) from the hash are used in the index. The HTABORG must have this same number of low-order bits equal to 0.
Adjust alignment mask and hash size for ppc64.
Note that the page table does not yet scale with the memory size, and the secondary hash is not yet used.
v5: * Move HTABORG mask to header. * Keep is_ppc64() for now, suggested by Alex. Merge CONFIG_PPC_64BITSUPPORT handling here so that it behaves as intended, not recognizing a ppc64 CPU in pure 32-bit mode. * Stub out hash_page_32() for ppc64 to avoid having to care about it for ppc64. hash_page_64() is less critical for pure ppc build since the code needs to be 32-bit compatible anyway.
v4: * Don't redefine hash_page_{32,64} but add TODO comments for renaming. * Keep and simplify hash_page().
v3: * Fix shifts. Thanks to Segher and Alex. * Fix HTABSIZE for 32-bit ppc64: ((2 << 15) - 1) >> 16 happened to be 0, so that it worked as expected. Avoid underflow, spotted by Alex. Make this fix available for legacy 64-bit support as well. * Drop get_hash_size(). It was only used in hash_page_32() and caused unnecessary conversions between HTABMASK and size. * Introduce {mfsdr1,mtsdr1} as suggested by Alex. Note that this changes the volatility for hash_page_64(). * Conditionalize 64-bit support, drop 32-bit support for ppc64. * Adjust hash size for ppc64.
v2: * Use HTABSIZE for ppc64, fix HTABMASK usage for ppc. Error spotted by Segher.
Cc: Alexander Graf agraf@suse.de Cc: Segher Boessenkool segher@kernel.crashing.org Signed-off-by: Andreas Färber andreas.faerber@web.de --- arch/ppc/qemu/ofmem.c | 45 +++++++++++++++++++++-------------------- include/arch/ppc/processor.h | 18 ++++++++++++++++ 2 files changed, 41 insertions(+), 22 deletions(-)
diff --git a/arch/ppc/qemu/ofmem.c b/arch/ppc/qemu/ofmem.c index e871623..a844ea7 100644 --- a/arch/ppc/qemu/ofmem.c +++ b/arch/ppc/qemu/ofmem.c @@ -48,7 +48,12 @@ extern void setup_mmu( unsigned long code_base ); #define OF_CODE_START 0xfff00000UL #define IO_BASE 0x80000000
-#define HASH_SIZE (2 << 15) +#ifdef __powerpc64__ +#define HASH_BITS 18 +#else +#define HASH_BITS 15 +#endif +#define HASH_SIZE (2 << HASH_BITS) #define OFMEM_SIZE (2 * 1024 * 1024)
#define SEGR_USER BIT(2) @@ -57,21 +62,7 @@ extern void setup_mmu( unsigned long code_base ); static inline unsigned long get_hash_base( void ) { - unsigned long sdr1; - - asm volatile("mfsdr1 %0" : "=r" (sdr1) ); - - return (sdr1 & 0xffff0000); -} - -static inline unsigned long -get_hash_size( void ) -{ - unsigned long sdr1; - - asm volatile("mfsdr1 %0" : "=r" (sdr1) ); - - return ((sdr1 << 16) | 0x0000ffff) + 1; + return (mfsdr1() & SDR1_HTABORG_MASK); }
static inline unsigned long @@ -242,7 +233,7 @@ hash_page_64( ucell ea, ucell phys, ucell mode ) vsid = (ea >> 28) + SEGR_BASE; vsid_sh = 7; vsid_mask = 0x00003FFFFFFFFF80ULL; - asm ( "mfsdr1 %0" : "=r" (sdr) ); + sdr = mfsdr1(); sdr_sh = 18; sdr_mask = 0x3FF80; page_mask = 0x0FFFFFFF; // XXX correct? @@ -297,6 +288,7 @@ hash_page_64( ucell ea, ucell phys, ucell mode ) static void hash_page_32( ucell ea, ucell phys, ucell mode ) { +#ifndef __powerpc64__ static int next_grab_slot=0; unsigned long *upte, cmp, hash1; int i, vsid, found; @@ -307,7 +299,7 @@ hash_page_32( ucell ea, ucell phys, ucell mode )
hash1 = vsid; hash1 ^= (ea >> 12) & 0xffff; - hash1 &= (get_hash_size() - 1) >> 6; + hash1 &= (((mfsdr1() & 0x1ff) << 16) | 0xffff) >> 6;
pp = (mPTE_t*)(get_hash_base() + (hash1 << 6)); upte = (unsigned long*)pp; @@ -332,14 +324,22 @@ hash_page_32( ucell ea, ucell phys, ucell mode ) upte[i*2+1] = (phys & ~0xfff) | mode;
asm volatile( "tlbie %0" :: "r"(ea) ); +#endif }
static int is_ppc64(void) { +#ifdef __powerpc64__ + return 1; +#elif defined(CONFIG_PPC_64BITSUPPORT) unsigned int pvr = mfpvr(); return ((pvr >= 0x330000) && (pvr < 0x70330000)); +#else + return 0; +#endif }
+/* XXX Remove these ugly constructs when legacy 64-bit support is dropped. */ static void hash_page( unsigned long ea, unsigned long phys, ucell mode ) { if ( is_ppc64() ) @@ -385,20 +385,21 @@ void setup_mmu( unsigned long ramsize ) { ofmem_t *ofmem; - unsigned long sdr1; #ifndef __powerpc64__ unsigned long sr_base; #endif unsigned long hash_base; - unsigned long hash_mask = 0xfff00000; /* alignment for ppc64 */ + unsigned long hash_mask = ~0x000fffffUL; /* alignment for ppc64 */ int i;
/* SDR1: Storage Description Register 1 */
hash_base = (ramsize - 0x00100000 - HASH_SIZE) & hash_mask; memset((void *)hash_base, 0, HASH_SIZE); - sdr1 = hash_base | ((HASH_SIZE-1) >> 16); - asm volatile("mtsdr1 %0" :: "r" (sdr1) ); + if (is_ppc64()) + mtsdr1(hash_base | MAX(HASH_BITS - 18, 0)); + else + mtsdr1(hash_base | ((HASH_SIZE - 1) >> 16));
#ifdef __powerpc64__
diff --git a/include/arch/ppc/processor.h b/include/arch/ppc/processor.h index c2f1284..21e4fab 100644 --- a/include/arch/ppc/processor.h +++ b/include/arch/ppc/processor.h @@ -425,6 +425,24 @@ static inline void mtmsr(unsigned long msr) #endif }
+#ifdef __powerpc64__ +#define SDR1_HTABORG_MASK 0x3FFFFFFFFFFC0000UL +#else +#define SDR1_HTABORG_MASK 0xffff0000 +#endif + +static inline unsigned long mfsdr1(void) +{ + unsigned long sdr1; + asm volatile("mfsdr1 %0" : "=r" (sdr1)); + return sdr1; +} + +static inline void mtsdr1(unsigned long sdr1) +{ + asm volatile("mtsdr1 %0" :: "r" (sdr1)); +} + static inline unsigned int mfpvr(void) { unsigned int pvr;