2005-04-17 02:20:36 +04:00
/*
* PowerPC64 SLB support .
*
* Copyright ( C ) 2004 David Gibson < dwg @ au . ibm . com > , IBM
* Based on earlier code writteh by :
* Dave Engebretsen and Mike Corrigan { engebret | mikejc } @ us . ibm . com
* Copyright ( c ) 2001 Dave Engebretsen
* Copyright ( C ) 2002 Anton Blanchard < anton @ au . ibm . com > , IBM
*
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
2005-11-07 03:06:55 +03:00
# undef DEBUG
2005-04-17 02:20:36 +04:00
# include <asm/pgtable.h>
# include <asm/mmu.h>
# include <asm/mmu_context.h>
# include <asm/paca.h>
# include <asm/cputable.h>
2005-11-07 03:06:55 +03:00
# include <asm/cacheflush.h>
2006-08-07 10:19:19 +04:00
# include <asm/smp.h>
2006-11-14 04:57:38 +03:00
# include <asm/firmware.h>
2006-08-07 10:19:19 +04:00
# include <linux/compiler.h>
2005-11-07 03:06:55 +03:00
# ifdef DEBUG
# define DBG(fmt...) udbg_printf(fmt)
# else
# define DBG(fmt...)
# endif
2005-04-17 02:20:36 +04:00
2005-11-07 03:06:55 +03:00
extern void slb_allocate_realmode ( unsigned long ea ) ;
extern void slb_allocate_user ( unsigned long ea ) ;
static void slb_allocate ( unsigned long ea )
{
/* Currently, we do real mode for all SLBs including user, but
* that will change if we bring back dynamic VSIDs
*/
slb_allocate_realmode ( ea ) ;
}
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
static inline unsigned long mk_esid_data ( unsigned long ea , int ssize ,
unsigned long slot )
2005-04-17 02:20:36 +04:00
{
2007-10-11 14:37:10 +04:00
unsigned long mask ;
mask = ( ssize = = MMU_SEGSIZE_256M ) ? ESID_MASK : ESID_MASK_1T ;
return ( ea & mask ) | SLB_ESID_V | slot ;
2005-04-17 02:20:36 +04:00
}
2007-10-11 14:37:10 +04:00
# define slb_vsid_shift(ssize) \
( ( ssize ) = = MMU_SEGSIZE_256M ? SLB_VSID_SHIFT : SLB_VSID_SHIFT_1T )
static inline unsigned long mk_vsid_data ( unsigned long ea , int ssize ,
unsigned long flags )
2005-04-17 02:20:36 +04:00
{
2007-10-11 14:37:10 +04:00
return ( get_kernel_vsid ( ea , ssize ) < < slb_vsid_shift ( ssize ) ) | flags |
( ( unsigned long ) ssize < < SLB_VSID_SSIZE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-11 14:37:10 +04:00
static inline void slb_shadow_update ( unsigned long ea , int ssize ,
2007-08-03 05:55:39 +04:00
unsigned long flags ,
2006-08-07 10:19:19 +04:00
unsigned long entry )
2005-04-17 02:20:36 +04:00
{
2006-08-07 10:19:19 +04:00
/*
* Clear the ESID first so the entry is not valid while we are
2007-08-24 10:58:37 +04:00
* updating it . No write barriers are needed here , provided
* we only update the current CPU ' s SLB shadow buffer .
2006-08-07 10:19:19 +04:00
*/
get_slb_shadow ( ) - > save_area [ entry ] . esid = 0 ;
2007-10-11 14:37:10 +04:00
get_slb_shadow ( ) - > save_area [ entry ] . vsid = mk_vsid_data ( ea , ssize , flags ) ;
get_slb_shadow ( ) - > save_area [ entry ] . esid = mk_esid_data ( ea , ssize , entry ) ;
2006-08-07 10:19:19 +04:00
}
2007-08-10 15:04:07 +04:00
static inline void slb_shadow_clear ( unsigned long entry )
2006-08-07 10:19:19 +04:00
{
2007-08-10 15:04:07 +04:00
get_slb_shadow ( ) - > save_area [ entry ] . esid = 0 ;
2005-04-17 02:20:36 +04:00
}
2007-10-11 14:37:10 +04:00
static inline void create_shadowed_slbe ( unsigned long ea , int ssize ,
unsigned long flags ,
2007-08-25 07:14:28 +04:00
unsigned long entry )
{
/*
* Updating the shadow buffer before writing the SLB ensures
* we don ' t get a stale entry here if we get preempted by PHYP
* between these two statements .
*/
2007-10-11 14:37:10 +04:00
slb_shadow_update ( ea , ssize , flags , entry ) ;
2007-08-25 07:14:28 +04:00
asm volatile ( " slbmte %0,%1 " :
2007-10-11 14:37:10 +04:00
: " r " ( mk_vsid_data ( ea , ssize , flags ) ) ,
" r " ( mk_esid_data ( ea , ssize , entry ) )
2007-08-25 07:14:28 +04:00
: " memory " ) ;
}
2006-06-15 04:45:18 +04:00
void slb_flush_and_rebolt ( void )
2005-04-17 02:20:36 +04:00
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too . */
2006-06-15 04:45:18 +04:00
unsigned long linear_llp , vmalloc_llp , lflags , vflags ;
2007-10-11 14:37:10 +04:00
unsigned long ksp_esid_data , ksp_vsid_data ;
2005-04-17 02:20:36 +04:00
WARN_ON ( ! irqs_disabled ( ) ) ;
2005-11-07 03:06:55 +03:00
linear_llp = mmu_psize_defs [ mmu_linear_psize ] . sllp ;
2006-06-15 04:45:18 +04:00
vmalloc_llp = mmu_psize_defs [ mmu_vmalloc_psize ] . sllp ;
2005-11-07 03:06:55 +03:00
lflags = SLB_VSID_KERNEL | linear_llp ;
2006-06-15 04:45:18 +04:00
vflags = SLB_VSID_KERNEL | vmalloc_llp ;
2005-04-17 02:20:36 +04:00
2007-10-11 14:37:10 +04:00
ksp_esid_data = mk_esid_data ( get_paca ( ) - > kstack , mmu_kernel_ssize , 2 ) ;
if ( ( ksp_esid_data & ~ 0xfffffffUL ) < = PAGE_OFFSET ) {
2005-04-17 02:20:36 +04:00
ksp_esid_data & = ~ SLB_ESID_V ;
2007-10-11 14:37:10 +04:00
ksp_vsid_data = 0 ;
2007-08-10 15:04:07 +04:00
slb_shadow_clear ( 2 ) ;
} else {
/* Update stack entry; others don't change */
2007-10-11 14:37:10 +04:00
slb_shadow_update ( get_paca ( ) - > kstack , mmu_kernel_ssize , lflags , 2 ) ;
ksp_vsid_data = get_slb_shadow ( ) - > save_area [ 2 ] . vsid ;
2007-08-10 15:04:07 +04:00
}
2006-08-07 10:19:19 +04:00
2005-04-17 02:20:36 +04:00
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it . */
asm volatile ( " isync \n "
" slbia \n "
/* Slot 1 - first VMALLOC segment */
" slbmte %0,%1 \n "
/* Slot 2 - kernel stack */
" slbmte %2,%3 \n "
" isync "
2007-10-11 14:37:10 +04:00
: : " r " ( mk_vsid_data ( VMALLOC_START , mmu_kernel_ssize , vflags ) ) ,
" r " ( mk_esid_data ( VMALLOC_START , mmu_kernel_ssize , 1 ) ) ,
" r " ( ksp_vsid_data ) ,
2005-04-17 02:20:36 +04:00
" r " ( ksp_esid_data )
: " memory " ) ;
}
2007-08-03 05:55:39 +04:00
void slb_vmalloc_update ( void )
{
unsigned long vflags ;
vflags = SLB_VSID_KERNEL | mmu_psize_defs [ mmu_vmalloc_psize ] . sllp ;
2007-10-11 14:37:10 +04:00
slb_shadow_update ( VMALLOC_START , mmu_kernel_ssize , vflags , 1 ) ;
2007-08-03 05:55:39 +04:00
slb_flush_and_rebolt ( ) ;
}
2005-04-17 02:20:36 +04:00
/* Flush all user entries from the segment table of the current processor. */
void switch_slb ( struct task_struct * tsk , struct mm_struct * mm )
{
unsigned long offset = get_paca ( ) - > slb_cache_ptr ;
2007-10-11 14:37:10 +04:00
unsigned long slbie_data = 0 ;
2005-04-17 02:20:36 +04:00
unsigned long pc = KSTK_EIP ( tsk ) ;
unsigned long stack = KSTK_ESP ( tsk ) ;
unsigned long unmapped_base ;
if ( offset < = SLB_CACHE_ENTRIES ) {
int i ;
asm volatile ( " isync " : : : " memory " ) ;
for ( i = 0 ; i < offset ; i + + ) {
2007-10-11 14:37:10 +04:00
slbie_data = ( unsigned long ) get_paca ( ) - > slb_cache [ i ]
< < SID_SHIFT ; /* EA */
slbie_data | = user_segment_size ( slbie_data )
< < SLBIE_SSIZE_SHIFT ;
slbie_data | = SLBIE_C ; /* C set for user addresses */
asm volatile ( " slbie %0 " : : " r " ( slbie_data ) ) ;
2005-04-17 02:20:36 +04:00
}
asm volatile ( " isync " : : : " memory " ) ;
} else {
slb_flush_and_rebolt ( ) ;
}
/* Workaround POWER5 < DD2.1 issue */
if ( offset = = 1 | | offset > SLB_CACHE_ENTRIES )
2007-10-11 14:37:10 +04:00
asm volatile ( " slbie %0 " : : " r " ( slbie_data ) ) ;
2005-04-17 02:20:36 +04:00
get_paca ( ) - > slb_cache_ptr = 0 ;
get_paca ( ) - > context = mm - > context ;
/*
* preload some userspace segments into the SLB .
*/
if ( test_tsk_thread_flag ( tsk , TIF_32BIT ) )
unmapped_base = TASK_UNMAPPED_BASE_USER32 ;
else
unmapped_base = TASK_UNMAPPED_BASE_USER64 ;
2005-12-04 10:39:15 +03:00
if ( is_kernel_addr ( pc ) )
2005-04-17 02:20:36 +04:00
return ;
slb_allocate ( pc ) ;
if ( GET_ESID ( pc ) = = GET_ESID ( stack ) )
return ;
2005-12-04 10:39:15 +03:00
if ( is_kernel_addr ( stack ) )
2005-04-17 02:20:36 +04:00
return ;
slb_allocate ( stack ) ;
if ( ( GET_ESID ( pc ) = = GET_ESID ( unmapped_base ) )
| | ( GET_ESID ( stack ) = = GET_ESID ( unmapped_base ) ) )
return ;
2005-12-04 10:39:15 +03:00
if ( is_kernel_addr ( unmapped_base ) )
2005-04-17 02:20:36 +04:00
return ;
slb_allocate ( unmapped_base ) ;
}
2005-11-07 03:06:55 +03:00
static inline void patch_slb_encoding ( unsigned int * insn_addr ,
unsigned int immed )
{
/* Assume the instruction had a "0" immediate value, just
* " or " in the new value
*/
* insn_addr | = immed ;
flush_icache_range ( ( unsigned long ) insn_addr , 4 +
( unsigned long ) insn_addr ) ;
}
2005-04-17 02:20:36 +04:00
void slb_initialize ( void )
{
2006-06-15 04:45:18 +04:00
unsigned long linear_llp , vmalloc_llp , io_llp ;
2006-11-14 04:57:38 +03:00
unsigned long lflags , vflags ;
2005-11-07 03:06:55 +03:00
static int slb_encoding_inited ;
extern unsigned int * slb_miss_kernel_load_linear ;
2006-06-15 04:45:18 +04:00
extern unsigned int * slb_miss_kernel_load_io ;
2005-11-07 03:06:55 +03:00
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs [ mmu_linear_psize ] . sllp ;
2006-06-15 04:45:18 +04:00
io_llp = mmu_psize_defs [ mmu_io_psize ] . sllp ;
vmalloc_llp = mmu_psize_defs [ mmu_vmalloc_psize ] . sllp ;
get_paca ( ) - > vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp ;
2005-11-07 03:06:55 +03:00
if ( ! slb_encoding_inited ) {
slb_encoding_inited = 1 ;
patch_slb_encoding ( slb_miss_kernel_load_linear ,
SLB_VSID_KERNEL | linear_llp ) ;
2006-06-15 04:45:18 +04:00
patch_slb_encoding ( slb_miss_kernel_load_io ,
SLB_VSID_KERNEL | io_llp ) ;
2005-11-07 03:06:55 +03:00
DBG ( " SLB: linear LLP = %04x \n " , linear_llp ) ;
2006-06-15 04:45:18 +04:00
DBG ( " SLB: io LLP = %04x \n " , io_llp ) ;
2005-11-07 03:06:55 +03:00
}
2006-11-14 04:57:38 +03:00
get_paca ( ) - > stab_rr = SLB_NUM_BOLTED ;
2005-04-17 02:20:36 +04:00
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head . S */
2006-11-14 04:57:38 +03:00
if ( firmware_has_feature ( FW_FEATURE_ISERIES ) )
return ;
2005-04-17 02:20:36 +04:00
2005-11-07 03:06:55 +03:00
lflags = SLB_VSID_KERNEL | linear_llp ;
2006-06-15 04:45:18 +04:00
vflags = SLB_VSID_KERNEL | vmalloc_llp ;
2005-04-17 02:20:36 +04:00
2005-11-07 03:06:55 +03:00
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
2007-08-25 07:14:28 +04:00
asm volatile ( " isync " : : : " memory " ) ;
asm volatile ( " slbmte %0,%0 " : : " r " ( 0 ) : " memory " ) ;
asm volatile ( " isync; slbia; isync " : : : " memory " ) ;
2007-10-11 14:37:10 +04:00
create_shadowed_slbe ( PAGE_OFFSET , mmu_kernel_ssize , lflags , 0 ) ;
2007-08-25 07:14:28 +04:00
2007-10-11 14:37:10 +04:00
create_shadowed_slbe ( VMALLOC_START , mmu_kernel_ssize , vflags , 1 ) ;
2007-08-25 07:14:28 +04:00
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment . By the time it goes
* elsewhere , we ' ll call _switch ( ) which will bolt in the new
* one . */
asm volatile ( " isync " : : : " memory " ) ;
2005-04-17 02:20:36 +04:00
}