[OpenBIOS] [PATCH v4] ppc: SDR1 fixes

Alexander Graf agraf at suse.de
Sun Nov 21 22:53:35 CET 2010


On 21.11.2010, at 22:40, Andreas Färber <andreas.faerber at web.de> wrote:

> Am 21.11.2010 um 22:22 schrieb Alexander Graf:
> 
>> On 21.11.2010, at 21:08, Andreas Färber wrote:
>> 
>>> Clean up use of SDR1 fields.
>>> 
>>> IBM's "The Programming Environments for 32-Bit Microprocessors":
>>> The HTABMASK field in SDR1 contains a mask value that determines
>>> how many bits from the hash are used in the page table index. This mask
>>> must be of the form 0b00...011...1; that is, a string of 0 bits followed by
>>> a string of 1bits. The 1 bits determine how many additional bits (at least 10)
>>> from the hash are used in the index; HTABORG must have this same number of
>>> low-order bits equal to 0.
>>> 
>>> IBM's "The Programming Environments Manual for 64-bit Microprocessors" 3.0:
>>> The HTABSIZE field in SDR1 contains an integer value that determines
>>> how many bits from the hash are used in the page table index. This number
>>> must not exceed 28. HTABSIZE is used to generate a mask of the form
>>> 0b00...011...1; that is, a string of 0 bits followed by a string of 1-bits.
>>> The 1-bits determine how many additional bits (beyond the minimum of 11)
>>> from the hash are used in the index. The HTABORG must have this same number
>>> of low-order bits equal to 0.
>>> 
>>> Adjust alignment mask and hash size for ppc64.
>>> 
>>> Note that the page table does not yet scale with the memory size, and
>>> the secondary hash is not yet used.
>>> 
>>> v4:
>>> * Don't redefine hash_page_{32,64} but add TODO comments for renaming.
>>> * Keep and simplify hash_page().
>>> 
>>> v3:
>>> * Fix shifts. Thanks to Segher and Alex.
>>> * Fix HTABSIZE for 32-bit ppc64: ((2 << 15) - 1) >> 16 happened to be 0,
>>> so that it worked as expected. Avoid underflow, spotted by Alex.
>>> Make this fix available for legacy 64-bit support as well.
>>> * Drop get_hash_size(). It was only used in hash_page_32() and caused
>>> unnecessary conversions between HTABMASK and size.
>>> * Introduce {mfsdr1,mtsdr1} as suggested by Alex.
>>> Note that this changes the volatility for hash_page_64().
>>> * Conditionalize 64-bit support, drop 32-bit support for ppc64.
>>> * Adjust hash size for ppc64.
>>> 
>>> v2:
>>> * Use HTABSIZE for ppc64, fix HTABMASK usage for ppc.
>>> Error spotted by Segher.
>>> 
>>> Cc: Alexander Graf <agraf at suse.de>
>>> Cc: Segher Boessenkool <segher at kernel.crashing.org>
>>> Signed-off-by: Andreas Färber <andreas.faerber at web.de>
>>> ---
>>> arch/ppc/qemu/ofmem.c        |   69 ++++++++++++++++++++++++++++-------------
>>> include/arch/ppc/processor.h |   12 +++++++
>>> 2 files changed, 59 insertions(+), 22 deletions(-)
>>> 
>>> diff --git a/arch/ppc/qemu/ofmem.c b/arch/ppc/qemu/ofmem.c
>>> index 72694b3..6f3079a 100644
>>> --- a/arch/ppc/qemu/ofmem.c
>>> +++ b/arch/ppc/qemu/ofmem.c
>>> @@ -48,7 +48,12 @@ extern void setup_mmu( unsigned long code_base );
>>> #define OF_CODE_START    0xfff00000UL
>>> #define IO_BASE            0x80000000
>>> 
>>> -#define HASH_SIZE        (2 << 15)
>>> +#ifdef __powerpc64__
>>> +#define HASH_BITS        18
>>> +#else
>>> +#define HASH_BITS        15
>>> +#endif
>>> +#define HASH_SIZE        (2 << HASH_BITS)
>>> #define OFMEM_SIZE        (2 * 1024 * 1024)
>>> 
>>> #define    SEGR_USER        BIT(2)
>>> @@ -57,21 +62,11 @@ extern void setup_mmu( unsigned long code_base );
>>> static inline unsigned long
>>> get_hash_base( void )
>>> {
>>> -    unsigned long sdr1;
>>> -
>>> -    asm volatile("mfsdr1 %0" : "=r" (sdr1) );
>>> -
>>> -    return (sdr1 & 0xffff0000);
>>> -}
>>> -
>>> -static inline unsigned long
>>> -get_hash_size( void )
>>> -{
>>> -    unsigned long sdr1;
>>> -
>>> -    asm volatile("mfsdr1 %0" : "=r" (sdr1) );
>>> -
>>> -    return ((sdr1 << 16) | 0x0000ffff) + 1;
>>> +#ifdef __powerpc64__
>>> +    return (mfsdr1() & 0x3FFFFFFFFFFC0000UL);
>> 
>> Just define these as costants and keep the actual code clean :).
> 
> OK.
> 
>>> +#else
>>> +    return (mfsdr1() & 0xffff0000UL);
>>> +#endif
>>> }
>>> 
>>> static inline unsigned long
>>> @@ -228,6 +223,8 @@ ea_to_phys( ucell ea, ucell *mode )
>>>    return phys;
>>> }
>>> 
>>> +#if defined(__powerpc64__) || defined(CONFIG_PPC_64BITSUPPORT)
>>> +
>>> static void
>>> hash_page_64( ucell ea, ucell phys, ucell mode )
>>> {
>>> @@ -242,7 +239,7 @@ hash_page_64( ucell ea, ucell phys, ucell mode )
>>>    vsid = (ea >> 28) + SEGR_BASE;
>>>    vsid_sh = 7;
>>>    vsid_mask = 0x00003FFFFFFFFF80ULL;
>>> -    asm ( "mfsdr1 %0" : "=r" (sdr) );
>>> +    sdr = mfsdr1();
>>>    sdr_sh = 18;
>>>    sdr_mask = 0x3FF80;
>>>    page_mask = 0x0FFFFFFF; // XXX correct?
>>> @@ -294,6 +291,12 @@ hash_page_64( ucell ea, ucell phys, ucell mode )
>>>    asm volatile( "tlbie %0"  :: "r"(ea) );
>>> }
>>> 
>>> +#endif /* ppc64 */
>>> +#ifndef __powerpc64__
>>> +/* XXX Turn this into an #else when legacy 64-bit support is gone
>>> + *     and rename hash_page{32,64} to hash_page.
>> 
>> Just keep them around. They shouldn't hurt. The code will be optimized away by the compiler anyways if CONFIG_PPC_64BITSUPPORT is not set, as static functions that never get caled (if on a constant 0 is never get called) just don't get compiled in.
> 
> They do hurt in that not calling them leads to an error.

Nope.

static void x(void)
{
}

This breaks:

#if 0
x();
#endif

This works:

if (0)
    x();

So if you keep the call to the 32 bit stuff in compiled code that gets optimized out because it can never be called, everybody is happy :).

> 
>>> + */
>>> +
>>> static void
>>> hash_page_32( ucell ea, ucell phys, ucell mode )
>>> {
>>> @@ -307,7 +310,7 @@ hash_page_32( ucell ea, ucell phys, ucell mode )
>>> 
>>>    hash1 = vsid;
>>>    hash1 ^= (ea >> 12) & 0xffff;
>>> -    hash1 &= (get_hash_size() - 1) >> 6;
>>> +    hash1 &= (((mfsdr1() & 0x1ff) << 16) | 0xffff) >> 6;
>>> 
>>>    pp = (mPTE_t*)(get_hash_base() + (hash1 << 6));
>>>    upte = (unsigned long*)pp;
>>> @@ -334,6 +337,9 @@ hash_page_32( ucell ea, ucell phys, ucell mode )
>>>    asm volatile( "tlbie %0"  :: "r"(ea) );
>>> }
>>> 
>>> +#endif /* ppc */
>>> +#ifdef CONFIG_PPC_64BITSUPPORT
>>> +
>>> static int is_ppc64(void)
>>> {
>>>    unsigned int pvr;
>>> @@ -342,12 +348,21 @@ static int is_ppc64(void)
>>>    return ((pvr >= 0x330000) && (pvr < 0x70330000));
>>> }
>>> 
>>> +#endif /* CONFIG_PPC_64BITSUPPORT */
>> 
>> 
>> static int is_ppc64(void)
>> {
>> #if defined(__powerpc64__)
>>   return 1;
>> #elif defined(CONFIG_PPC_64BITSUPPORT)
>>   unsigned int pvr = mfpvr();
>>   return ((pvr >= 0x330000) && (pvr < 0x70330000));
>> #else
>>   return 0;
>> #endif
>> }
>> 
>>> +
>>> +/* XXX Drop this function when legacy 64-bit support is dropped. */
>>> static void hash_page( unsigned long ea, unsigned long phys, ucell mode )
>>> {
>>> +#ifdef __powerpc64__
>>> +    hash_page_64(ea, phys, mode);
>>> +#else
>>> +#ifdef CONFIG_PPC_64BITSUPPORT
>>>    if ( is_ppc64() )
>>>        hash_page_64(ea, phys, mode);
>>>    else
>>> +#endif
>>>        hash_page_32(ea, phys, mode);
>>> +#endif
>> 
>>   if (is_ppc64)
>>       hash_page_64(ea, phys, mode);
>>   else
>>       hash_page_32(ea, phys, mode);
>> 
>>> }
>>> 
>>> void
>>> @@ -387,17 +402,27 @@ void
>>> setup_mmu( unsigned long ramsize )
>>> {
>>>    ofmem_t *ofmem;
>>> -    unsigned long sdr1, sr_base;
>>> +    unsigned long sr_base;
>>>    unsigned long hash_base;
>>> -    unsigned long hash_mask = 0xfff00000; /* alignment for ppc64 */
>>> +    unsigned long hash_mask = ~0x000fffffUL; /* alignment for ppc64 */
>>>    int i;
>>> 
>>>    /* SDR1: Storage Description Register 1 */
>>> 
>>>    hash_base = (ramsize - 0x00100000 - HASH_SIZE) & hash_mask;
>>>    memset((void *)hash_base, 0, HASH_SIZE);
>>> -    sdr1 = hash_base | ((HASH_SIZE-1) >> 16);
>>> -    asm volatile("mtsdr1 %0" :: "r" (sdr1) );
>>> +#if defined(__powerpc64__) || defined(CONFIG_PPC_64BITSUPPORT)
>>> +#ifdef CONFIG_PPC_64BITSUPPORT
>>> +    if (is_ppc64())
>>> +#endif
>>> +        mtsdr1(hash_base | MAX(HASH_BITS - 18, 0));
>>> +#ifdef CONFIG_PPC_64BITSUPPORT
>>> +    else
>>> +#endif
>>> +#endif
>>> +#ifndef __powerpc64__
>>> +        mtsdr1(hash_base | ((HASH_SIZE - 1) >> 16));
>>> +#endif
>>> 
>> 
>>   if (is_ppc64())
>>       mtsdr1(hash_base | MAX(HASH_BITS - 18, 0));
>>   else
>>       mtsdr1(hash_base | (HASH_SIZE - 1) >> 16, 0));
>> 
>> 
>> That's a lot easier to read, no?
> 
> So is
> 
> #ifdef __powerpc64__
>    mtsdr1(...);
> #else
>    mtsdr1(...);
> #endif
> 
> which I'm trying to reach.
> 
> In the SLB patch for comparison the final
> 
> #ifdef __powerpc64__
>    ...
> #else
>    ...
> #endif
> 
> code is so much nicer than
>    if (is_ppc64())
>        ...
>    else
>        ...
> 
> which requires a lot of reindentation to remain readable - reindentation that would later be reverted again when legacy support is dropped. Trying to avoid that.

For in total 10 lines of code?

Why are you trying so hard to get rid of legacy ppc64 compat? It doesn't hurt to have it around as long as you keep the #ifdefs to a reasonable minimum :). By then it's actually nice to have for code check reasons: compile ppc64 and be reasonably sure you don't break ppc32.


Alex

> 



More information about the OpenBIOS mailing list