2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Synthesize TLB refill handlers at runtime .
*
2008-01-28 20:05:38 +00:00
* Copyright ( C ) 2004 , 2005 , 2006 , 2008 Thiemo Seufer
2009-05-20 11:40:59 -07:00
* Copyright ( C ) 2005 , 2007 , 2008 , 2009 Maciej W . Rozycki
2006-04-05 09:45:45 +01:00
* Copyright ( C ) 2006 Ralf Baechle ( ralf @ linux - mips . org )
2009-05-27 17:47:44 -07:00
* Copyright ( C ) 2008 , 2009 Cavium Networks , Inc .
2006-04-05 09:45:45 +01:00
*
* . . . and the days got worse and worse and now you see
* I ' ve gone completly out of my mind .
*
* They ' re coming to take me a away haha
* they ' re coming to take me a away hoho hihi haha
* to the funny farm where code is beautiful all the time . . .
*
* ( Condolences to Napoleon XIV )
2005-04-16 15:20:36 -07:00
*/
2009-05-20 11:40:59 -07:00
# include <linux/bug.h>
2005-04-16 15:20:36 -07:00
# include <linux/kernel.h>
# include <linux/types.h>
2009-06-19 14:05:26 +01:00
# include <linux/smp.h>
2005-04-16 15:20:36 -07:00
# include <linux/string.h>
# include <linux/init.h>
2010-12-21 14:19:11 -08:00
# include <linux/cache.h>
2005-04-16 15:20:36 -07:00
2010-12-21 14:19:11 -08:00
# include <asm/cacheflush.h>
# include <asm/pgtable.h>
2005-04-16 15:20:36 -07:00
# include <asm/war.h>
2010-01-28 15:21:24 +01:00
# include <asm/uasm.h>
2012-03-28 18:30:02 +01:00
# include <asm/setup.h>
2008-01-28 20:05:38 +00:00
2010-04-28 12:16:18 -07:00
/*
* TLB load / store / modify handlers .
*
* Only the fastpath gets synthesized at runtime , the slowpath for
* do_page_fault remains normal asm .
*/
extern void tlb_do_page_fault_0 ( void ) ;
extern void tlb_do_page_fault_1 ( void ) ;
2011-07-05 16:34:46 -07:00
struct work_registers {
int r1 ;
int r2 ;
int r3 ;
} ;
struct tlb_reg_save {
unsigned long a ;
unsigned long b ;
} ____cacheline_aligned_in_smp ;
static struct tlb_reg_save handler_reg_save [ NR_CPUS ] ;
2010-04-28 12:16:18 -07:00
2007-10-11 23:46:14 +01:00
static inline int r45k_bvahwbug ( void )
2005-04-16 15:20:36 -07:00
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0 ;
}
2007-10-11 23:46:14 +01:00
static inline int r4k_250MHZhwbug ( void )
2005-04-16 15:20:36 -07:00
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0 ;
}
2007-10-11 23:46:14 +01:00
static inline int __maybe_unused bcm1250_m3_war ( void )
2005-04-16 15:20:36 -07:00
{
return BCM1250_M3_WAR ;
}
2007-10-11 23:46:14 +01:00
static inline int __maybe_unused r10000_llsc_war ( void )
2005-04-16 15:20:36 -07:00
{
return R10000_LLSC_WAR ;
}
2010-12-20 15:54:50 -08:00
static int use_bbit_insns ( void )
{
switch ( current_cpu_type ( ) ) {
case CPU_CAVIUM_OCTEON :
case CPU_CAVIUM_OCTEON_PLUS :
case CPU_CAVIUM_OCTEON2 :
return 1 ;
default :
return 0 ;
}
}
2010-12-27 18:07:57 -08:00
static int use_lwx_insns ( void )
{
switch ( current_cpu_type ( ) ) {
case CPU_CAVIUM_OCTEON2 :
return 1 ;
default :
return 0 ;
}
}
# if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
static bool scratchpad_available ( void )
{
return true ;
}
static int scratchpad_offset ( int i )
{
/*
* CVMSEG starts at address - 32768 and extends for
* CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines .
*/
i + = 1 ; /* Kernel use starts at the top and works down. */
return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - ( 8 * i ) - 32768 ;
}
# else
static bool scratchpad_available ( void )
{
return false ;
}
static int scratchpad_offset ( int i )
{
BUG ( ) ;
2011-01-19 15:24:42 -08:00
/* Really unreachable, but evidently some GCC want this. */
return 0 ;
2010-12-27 18:07:57 -08:00
}
# endif
2006-08-23 14:26:50 +01:00
/*
* Found by experiment : At least some revisions of the 4 kc throw under
* some circumstances a machine check exception , triggered by invalid
* values in the index register . Delaying the tlbp instruction until
* after the next branch , plus adding an additional nop in front of
* tlbwi / tlbwr avoids the invalid index register values . Nobody knows
* why ; it ' s not an issue caused by the core RTL .
*
*/
2008-03-08 09:56:28 +00:00
static int __cpuinit m4kc_tlbp_war ( void )
2006-08-23 14:26:50 +01:00
{
return ( current_cpu_data . processor_id & 0xffff00 ) = =
( PRID_COMP_MIPS | PRID_IMP_4KC ) ;
}
2008-01-28 20:05:38 +00:00
/* Handle labels (which must be positive integers). */
2005-04-16 15:20:36 -07:00
enum label_id {
2008-01-28 20:05:38 +00:00
label_second_part = 1 ,
2005-04-16 15:20:36 -07:00
label_leave ,
label_vmalloc ,
label_vmalloc_done ,
label_tlbw_hazard ,
label_split ,
2010-02-10 15:12:47 -08:00
label_tlbl_goaround1 ,
label_tlbl_goaround2 ,
2005-04-16 15:20:36 -07:00
label_nopage_tlbl ,
label_nopage_tlbs ,
label_nopage_tlbm ,
label_smp_pgtable_change ,
label_r3000_write_probe_fail ,
2010-04-28 12:16:18 -07:00
label_large_segbits_fault ,
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
label_tlb_huge_update ,
# endif
2005-04-16 15:20:36 -07:00
} ;
2008-01-28 20:05:38 +00:00
UASM_L_LA ( _second_part )
UASM_L_LA ( _leave )
UASM_L_LA ( _vmalloc )
UASM_L_LA ( _vmalloc_done )
UASM_L_LA ( _tlbw_hazard )
UASM_L_LA ( _split )
2010-02-10 15:12:47 -08:00
UASM_L_LA ( _tlbl_goaround1 )
UASM_L_LA ( _tlbl_goaround2 )
2008-01-28 20:05:38 +00:00
UASM_L_LA ( _nopage_tlbl )
UASM_L_LA ( _nopage_tlbs )
UASM_L_LA ( _nopage_tlbm )
UASM_L_LA ( _smp_pgtable_change )
UASM_L_LA ( _r3000_write_probe_fail )
2010-04-28 12:16:18 -07:00
UASM_L_LA ( _large_segbits_fault )
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
UASM_L_LA ( _tlb_huge_update )
# endif
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
2007-10-18 09:11:17 +02:00
/*
* For debug purposes .
*/
static inline void dump_handler ( const u32 * handler , int count )
{
int i ;
pr_debug ( " \t .set push \n " ) ;
pr_debug ( " \t .set noreorder \n " ) ;
for ( i = 0 ; i < count ; i + + )
pr_debug ( " \t %p \t .word 0x%08x \n " , & handler [ i ] , handler [ i ] ) ;
pr_debug ( " \t .set pop \n " ) ;
}
2005-04-16 15:20:36 -07:00
/* The only general purpose registers allowed in TLB handlers. */
# define K0 26
# define K1 27
/* Some CP0 registers */
2006-04-05 09:45:45 +01:00
# define C0_INDEX 0, 0
# define C0_ENTRYLO0 2, 0
# define C0_TCBIND 2, 2
# define C0_ENTRYLO1 3, 0
# define C0_CONTEXT 4, 0
2009-05-27 17:47:44 -07:00
# define C0_PAGEMASK 5, 0
2006-04-05 09:45:45 +01:00
# define C0_BADVADDR 8, 0
# define C0_ENTRYHI 10, 0
# define C0_EPC 14, 0
# define C0_XCONTEXT 20, 0
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2008-01-28 20:05:38 +00:00
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
2005-04-16 15:20:36 -07:00
# endif
/* The worst case length of the handler is around 18 instructions for
* R3000 - style TLBs and up to 63 instructions for R4000 - style TLBs .
* Maximum space available is 32 instructions for R3000 and 64
* instructions for R4000 .
*
* We deliberately chose a buffer size of 128 , so we won ' t scribble
* over anything important on overflow before we panic .
*/
2008-03-08 09:56:28 +00:00
static u32 tlb_handler [ 128 ] __cpuinitdata ;
2005-04-16 15:20:36 -07:00
/* simply assume worst case size for labels and relocs */
2008-03-08 09:56:28 +00:00
static struct uasm_label labels [ 128 ] __cpuinitdata ;
static struct uasm_reloc relocs [ 128 ] __cpuinitdata ;
2005-04-16 15:20:36 -07:00
2010-04-28 12:16:18 -07:00
# ifdef CONFIG_64BIT
static int check_for_high_segbits __cpuinitdata ;
# endif
2010-12-27 18:07:57 -08:00
static int check_for_high_segbits __cpuinitdata ;
2010-12-21 14:19:11 -08:00
static unsigned int kscratch_used_mask __cpuinitdata ;
static int __cpuinit allocate_kscratch ( void )
{
int r ;
unsigned int a = cpu_data [ 0 ] . kscratch_mask & ~ kscratch_used_mask ;
r = ffs ( a ) ;
if ( r = = 0 )
return - 1 ;
r - - ; /* make it zero based */
kscratch_used_mask | = ( 1 < < r ) ;
return r ;
}
2010-12-27 18:07:57 -08:00
static int scratch_reg __cpuinitdata ;
2010-12-21 14:19:11 -08:00
static int pgd_reg __cpuinitdata ;
2010-12-27 18:07:57 -08:00
enum vmalloc64_mode { not_refill , refill_scratch , refill_noscratch } ;
2011-07-05 16:34:46 -07:00
static struct work_registers __cpuinit build_get_work_registers ( u32 * * p )
{
struct work_registers r ;
int smp_processor_id_reg ;
int smp_processor_id_sel ;
int smp_processor_id_shift ;
if ( scratch_reg > 0 ) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0 ( p , 1 , 31 , scratch_reg ) ;
r . r1 = K0 ;
r . r2 = K1 ;
r . r3 = 1 ;
return r ;
}
if ( num_possible_cpus ( ) > 1 ) {
# ifdef CONFIG_MIPS_PGD_C0_CONTEXT
smp_processor_id_shift = 51 ;
smp_processor_id_reg = 20 ; /* XContext */
smp_processor_id_sel = 0 ;
# else
# ifdef CONFIG_32BIT
smp_processor_id_shift = 25 ;
smp_processor_id_reg = 4 ; /* Context */
smp_processor_id_sel = 0 ;
# endif
# ifdef CONFIG_64BIT
smp_processor_id_shift = 26 ;
smp_processor_id_reg = 4 ; /* Context */
smp_processor_id_sel = 0 ;
# endif
# endif
/* Get smp_processor_id */
UASM_i_MFC0 ( p , K0 , smp_processor_id_reg , smp_processor_id_sel ) ;
UASM_i_SRL_SAFE ( p , K0 , K0 , smp_processor_id_shift ) ;
/* handler_reg_save index in K0 */
UASM_i_SLL ( p , K0 , K0 , ilog2 ( sizeof ( struct tlb_reg_save ) ) ) ;
UASM_i_LA ( p , K1 , ( long ) & handler_reg_save ) ;
UASM_i_ADDU ( p , K0 , K0 , K1 ) ;
} else {
UASM_i_LA ( p , K0 , ( long ) & handler_reg_save ) ;
}
/* K0 now points to save area, save $1 and $2 */
UASM_i_SW ( p , 1 , offsetof ( struct tlb_reg_save , a ) , K0 ) ;
UASM_i_SW ( p , 2 , offsetof ( struct tlb_reg_save , b ) , K0 ) ;
r . r1 = K1 ;
r . r2 = 1 ;
r . r3 = 2 ;
return r ;
}
static void __cpuinit build_restore_work_registers ( u32 * * p )
{
if ( scratch_reg > 0 ) {
UASM_i_MFC0 ( p , 1 , 31 , scratch_reg ) ;
return ;
}
/* K0 already points to save area, restore $1 and $2 */
UASM_i_LW ( p , 1 , offsetof ( struct tlb_reg_save , a ) , K0 ) ;
UASM_i_LW ( p , 2 , offsetof ( struct tlb_reg_save , b ) , K0 ) ;
}
2010-12-27 18:07:57 -08:00
# ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2010-12-21 14:19:11 -08:00
2009-10-14 12:16:56 -07:00
/*
* CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current ,
* we cannot do r3000 under these circumstances .
2010-12-21 14:19:11 -08:00
*
* Declare pgd_current here instead of including mmu_context . h to avoid type
* conflicts for tlbmiss_handler_setup_pgd
2009-10-14 12:16:56 -07:00
*/
2010-12-21 14:19:11 -08:00
extern unsigned long pgd_current [ ] ;
2009-10-14 12:16:56 -07:00
2005-04-16 15:20:36 -07:00
/*
* The R3000 TLB handler is simple .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_refill_handler ( void )
2005-04-16 15:20:36 -07:00
{
long pgdc = ( long ) pgd_current ;
u32 * p ;
memset ( tlb_handler , 0 , sizeof ( tlb_handler ) ) ;
p = tlb_handler ;
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( & p , K0 , C0_BADVADDR ) ;
uasm_i_lui ( & p , K1 , uasm_rel_hi ( pgdc ) ) ; /* cp0 delay */
uasm_i_lw ( & p , K1 , uasm_rel_lo ( pgdc ) , K1 ) ;
uasm_i_srl ( & p , K0 , K0 , 22 ) ; /* load delay */
uasm_i_sll ( & p , K0 , K0 , 2 ) ;
uasm_i_addu ( & p , K1 , K1 , K0 ) ;
uasm_i_mfc0 ( & p , K0 , C0_CONTEXT ) ;
uasm_i_lw ( & p , K1 , 0 , K1 ) ; /* cp0 delay */
uasm_i_andi ( & p , K0 , K0 , 0xffc ) ; /* load delay */
uasm_i_addu ( & p , K1 , K1 , K0 ) ;
uasm_i_lw ( & p , K0 , 0 , K1 ) ;
uasm_i_nop ( & p ) ; /* load delay */
uasm_i_mtc0 ( & p , K0 , C0_ENTRYLO0 ) ;
uasm_i_mfc0 ( & p , K1 , C0_EPC ) ; /* cp0 delay */
uasm_i_tlbwr ( & p ) ; /* cp0 delay */
uasm_i_jr ( & p , K1 ) ;
uasm_i_rfe ( & p ) ; /* branch delay */
2005-04-16 15:20:36 -07:00
if ( p > tlb_handler + 32 )
panic ( " TLB refill handler space exceeded " ) ;
2008-01-28 20:05:38 +00:00
pr_debug ( " Wrote TLB refill handler (%u instructions). \n " ,
( unsigned int ) ( p - tlb_handler ) ) ;
2005-04-16 15:20:36 -07:00
2006-03-29 18:53:00 +01:00
memcpy ( ( void * ) ebase , tlb_handler , 0x80 ) ;
2007-10-18 09:11:17 +02:00
dump_handler ( ( u32 * ) ebase , 32 ) ;
2005-04-16 15:20:36 -07:00
}
2009-10-14 12:16:56 -07:00
# endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
2005-04-16 15:20:36 -07:00
/*
* The R4000 TLB handler is much more complicated . We have two
* consecutive handler areas with 32 instructions space each .
* Since they aren ' t used at the same time , we can overflow in the
* other one . To keep things simple , we first assume linear space ,
* then we relocate it to the final handler layout as needed .
*/
2008-03-08 09:56:28 +00:00
static u32 final_handler [ 64 ] __cpuinitdata ;
2005-04-16 15:20:36 -07:00
/*
* Hazards
*
* From the IDT errata for the QED RM5230 ( Nevada ) , processor revision 1.0 :
* 2. A timing hazard exists for the TLBP instruction .
*
* stalling_instruction
* TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction . This is not really correct as the stalling instruction
* can modify the address used to access the JTLB . The failure symptom is that
* the TLBP instruction will use an address created for the stalling instruction
* and not the address held in C0_ENHI and thus report the wrong results .
*
* The software work - around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall .
*
* Errata 2 will not be fixed . This errata is also on the R5000 .
*
* As if we MIPS hackers wouldn ' t know how to nop pipelines happy . . .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit __maybe_unused build_tlb_probe_entry ( u32 * * p )
2005-04-16 15:20:36 -07:00
{
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2008-05-12 13:55:42 +02:00
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */
2005-09-09 17:11:50 +00:00
case CPU_R4600 :
2008-05-12 13:55:42 +02:00
case CPU_R4700 :
2005-04-16 15:20:36 -07:00
case CPU_R5000 :
case CPU_R5000A :
case CPU_NEVADA :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_tlbp ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
default :
2008-01-28 20:05:38 +00:00
uasm_i_tlbp ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
}
}
/*
* Write random or indexed TLB entry , and care about the hazards from
2011-03-30 22:57:33 -03:00
* the preceding mtc0 and for the following eret .
2005-04-16 15:20:36 -07:00
*/
enum tlb_write_entry { tlb_random , tlb_indexed } ;
2008-03-08 09:56:28 +00:00
static void __cpuinit build_tlb_write_entry ( u32 * * p , struct uasm_label * * l ,
2008-01-28 20:05:38 +00:00
struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
enum tlb_write_entry wmode )
{
void ( * tlbw ) ( u32 * * ) = NULL ;
switch ( wmode ) {
2008-01-28 20:05:38 +00:00
case tlb_random : tlbw = uasm_i_tlbwr ; break ;
case tlb_indexed : tlbw = uasm_i_tlbwi ; break ;
2005-04-16 15:20:36 -07:00
}
2008-01-29 10:14:54 +00:00
if ( cpu_has_mips_r2 ) {
2009-05-12 12:41:53 -07:00
if ( cpu_has_mips_r2_exec_hazard )
uasm_i_ehb ( p ) ;
2008-01-29 10:14:54 +00:00
tlbw ( p ) ;
return ;
}
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_R4000PC :
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400PC :
case CPU_R4400SC :
case CPU_R4400MC :
/*
* This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction .
*/
2008-01-28 20:05:38 +00:00
uasm_il_bgezl ( p , r , 0 , label_tlbw_hazard ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_l_tlbw_hazard ( l , * p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_R4600 :
case CPU_R4700 :
case CPU_R5000 :
case CPU_R5000A :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-06-30 10:51:01 +00:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-06-30 10:51:01 +00:00
break ;
case CPU_R4300 :
2005-04-16 15:20:36 -07:00
case CPU_5KC :
case CPU_TX49XX :
2005-07-14 17:47:57 +00:00
case CPU_PR4450 :
2011-05-07 01:36:21 +05:30
case CPU_XLR :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
case CPU_R10000 :
case CPU_R12000 :
2006-05-16 22:23:59 -04:00
case CPU_R14000 :
2005-04-16 15:20:36 -07:00
case CPU_4KC :
2008-03-26 16:42:54 +01:00
case CPU_4KEC :
2005-04-16 15:20:36 -07:00
case CPU_SB1 :
2005-10-19 23:56:20 -07:00
case CPU_SB1A :
2005-04-16 15:20:36 -07:00
case CPU_4KSC :
case CPU_20KC :
case CPU_25KF :
2010-10-16 14:22:30 -07:00
case CPU_BMIPS32 :
case CPU_BMIPS3300 :
case CPU_BMIPS4350 :
case CPU_BMIPS4380 :
case CPU_BMIPS5000 :
2007-06-06 14:52:43 +08:00
case CPU_LOONGSON2 :
2009-03-03 18:05:51 +09:00
case CPU_R5500 :
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2009-03-25 17:49:30 +01:00
case CPU_ALCHEMY :
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
case CPU_NEVADA :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ; /* QED specifies 2 nops hazard */
2005-04-16 15:20:36 -07:00
/*
* This branch uses up a mtc0 hazard nop slot and saves
* a nop after the tlbw instruction .
*/
2008-01-28 20:05:38 +00:00
uasm_il_bgezl ( p , r , 0 , label_tlbw_hazard ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_l_tlbw_hazard ( l , * p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_RM7000 :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
case CPU_RM9000 :
/*
* When the JTLB is updated by tlbwi or tlbwr , a subsequent
* use of the JTLB for instructions should not occur for 4
* cpu cycles and use for data translations should not occur
* for 3 cpu cycles .
*/
2008-01-28 20:05:38 +00:00
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4181 :
case CPU_VR4181A :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_VR4131 :
case CPU_VR4133 :
2005-08-29 16:49:55 +00:00
case CPU_R5432 :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
2010-07-17 11:07:51 +00:00
case CPU_JZRISC :
tlbw ( p ) ;
uasm_i_nop ( p ) ;
break ;
2005-04-16 15:20:36 -07:00
default :
panic ( " No TLB refill handler yet (CPU type: %d) " ,
current_cpu_data . cputype ) ;
break ;
}
}
2010-02-10 15:12:47 -08:00
static __cpuinit __maybe_unused void build_convert_pte_to_entrylo ( u32 * * p ,
unsigned int reg )
2009-05-27 17:47:44 -07:00
{
2010-02-10 15:12:47 -08:00
if ( kernel_uses_smartmips_rixi ) {
UASM_i_SRL ( p , reg , reg , ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_ROTR ( p , reg , reg , ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
} else {
# ifdef CONFIG_64BIT_PHYS_ADDR
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( p , reg , reg , ilog2 ( _PAGE_GLOBAL ) ) ;
2010-02-10 15:12:47 -08:00
# else
UASM_i_SRL ( p , reg , reg , ilog2 ( _PAGE_GLOBAL ) ) ;
# endif
}
}
2009-05-27 17:47:44 -07:00
2010-02-10 15:12:47 -08:00
# ifdef CONFIG_HUGETLB_PAGE
2009-05-27 17:47:44 -07:00
2010-02-10 15:12:47 -08:00
static __cpuinit void build_restore_pagemask ( u32 * * p ,
struct uasm_reloc * * r ,
unsigned int tmp ,
2010-12-27 18:07:57 -08:00
enum label_id lid ,
int restore_scratch )
2010-02-10 15:12:47 -08:00
{
2010-12-27 18:07:57 -08:00
if ( restore_scratch ) {
/* Reset default page size */
if ( PM_DEFAULT_MASK > > 16 ) {
uasm_i_lui ( p , tmp , PM_DEFAULT_MASK > > 16 ) ;
uasm_i_ori ( p , tmp , tmp , PM_DEFAULT_MASK & 0xffff ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
uasm_il_b ( p , r , lid ) ;
} else if ( PM_DEFAULT_MASK ) {
uasm_i_ori ( p , tmp , 0 , PM_DEFAULT_MASK ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
uasm_il_b ( p , r , lid ) ;
} else {
uasm_i_mtc0 ( p , 0 , C0_PAGEMASK ) ;
uasm_il_b ( p , r , lid ) ;
}
if ( scratch_reg > 0 )
UASM_i_MFC0 ( p , 1 , 31 , scratch_reg ) ;
else
UASM_i_LW ( p , 1 , scratchpad_offset ( 0 ) , 0 ) ;
2009-05-27 17:47:44 -07:00
} else {
2010-12-27 18:07:57 -08:00
/* Reset default page size */
if ( PM_DEFAULT_MASK > > 16 ) {
uasm_i_lui ( p , tmp , PM_DEFAULT_MASK > > 16 ) ;
uasm_i_ori ( p , tmp , tmp , PM_DEFAULT_MASK & 0xffff ) ;
uasm_il_b ( p , r , lid ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
} else if ( PM_DEFAULT_MASK ) {
uasm_i_ori ( p , tmp , 0 , PM_DEFAULT_MASK ) ;
uasm_il_b ( p , r , lid ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
} else {
uasm_il_b ( p , r , lid ) ;
uasm_i_mtc0 ( p , 0 , C0_PAGEMASK ) ;
}
2009-05-27 17:47:44 -07:00
}
}
2010-02-10 15:12:47 -08:00
static __cpuinit void build_huge_tlb_write_entry ( u32 * * p ,
struct uasm_label * * l ,
struct uasm_reloc * * r ,
unsigned int tmp ,
2010-12-27 18:07:57 -08:00
enum tlb_write_entry wmode ,
int restore_scratch )
2010-02-10 15:12:47 -08:00
{
/* Set huge page tlb entry size */
uasm_i_lui ( p , tmp , PM_HUGE_MASK > > 16 ) ;
uasm_i_ori ( p , tmp , tmp , PM_HUGE_MASK & 0xffff ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
build_tlb_write_entry ( p , l , r , wmode ) ;
2010-12-27 18:07:57 -08:00
build_restore_pagemask ( p , r , tmp , label_leave , restore_scratch ) ;
2010-02-10 15:12:47 -08:00
}
2009-05-27 17:47:44 -07:00
/*
* Check if Huge PTE is present , if so then jump to LABEL .
*/
static void __cpuinit
build_is_huge_pte ( u32 * * p , struct uasm_reloc * * r , unsigned int tmp ,
unsigned int pmd , int lid )
{
UASM_i_LW ( p , tmp , 0 , pmd ) ;
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
uasm_il_bbit1 ( p , r , tmp , ilog2 ( _PAGE_HUGE ) , lid ) ;
} else {
uasm_i_andi ( p , tmp , tmp , _PAGE_HUGE ) ;
uasm_il_bnez ( p , r , tmp , lid ) ;
}
2009-05-27 17:47:44 -07:00
}
static __cpuinit void build_huge_update_entries ( u32 * * p ,
unsigned int pte ,
unsigned int tmp )
{
int small_sequence ;
/*
* A huge PTE describes an area the size of the
* configured huge page size . This is twice the
* of the large TLB entry size we intend to use .
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space .
*/
small_sequence = ( HPAGE_SIZE > > 7 ) < 0x10000 ;
/* We can clobber tmp. It isn't used after this.*/
if ( ! small_sequence )
uasm_i_lui ( p , tmp , HPAGE_SIZE > > ( 7 + 16 ) ) ;
2010-02-10 15:12:47 -08:00
build_convert_pte_to_entrylo ( p , pte ) ;
2010-02-10 15:12:44 -08:00
UASM_i_MTC0 ( p , pte , C0_ENTRYLO0 ) ; /* load it */
2009-05-27 17:47:44 -07:00
/* convert to entrylo1 */
if ( small_sequence )
UASM_i_ADDIU ( p , pte , pte , HPAGE_SIZE > > 7 ) ;
else
UASM_i_ADDU ( p , pte , pte , tmp ) ;
2010-02-10 15:12:44 -08:00
UASM_i_MTC0 ( p , pte , C0_ENTRYLO1 ) ; /* load it */
2009-05-27 17:47:44 -07:00
}
static __cpuinit void build_huge_handler_tail ( u32 * * p ,
struct uasm_reloc * * r ,
struct uasm_label * * l ,
unsigned int pte ,
unsigned int ptr )
{
# ifdef CONFIG_SMP
UASM_i_SC ( p , pte , 0 , ptr ) ;
uasm_il_beqz ( p , r , pte , label_tlb_huge_update ) ;
UASM_i_LW ( p , pte , 0 , ptr ) ; /* Needed because SC killed our PTE */
# else
UASM_i_SW ( p , pte , 0 , ptr ) ;
# endif
build_huge_update_entries ( p , pte , ptr ) ;
2010-12-27 18:07:57 -08:00
build_huge_tlb_write_entry ( p , l , r , pte , tlb_indexed , 0 ) ;
2009-05-27 17:47:44 -07:00
}
# endif /* CONFIG_HUGETLB_PAGE */
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
/*
* TMP and PTR are scratch .
* TMP will be clobbered , PTR will hold the pmd entry .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_get_pmde64 ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
unsigned int tmp , unsigned int ptr )
{
2009-10-14 12:16:56 -07:00
# ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2005-04-16 15:20:36 -07:00
long pgdc = ( long ) pgd_current ;
2009-10-14 12:16:56 -07:00
# endif
2005-04-16 15:20:36 -07:00
/*
* The vmalloc handling is not in the hotpath .
*/
2008-01-28 20:05:38 +00:00
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ;
2010-04-28 12:16:18 -07:00
if ( check_for_high_segbits ) {
/*
* The kernel currently implicitely assumes that the
* MIPS SEGBITS parameter for the processor is
* ( PGDIR_SHIFT + PGDIR_BITS ) or less , and will never
* allocate virtual addresses outside the maximum
* range for SEGBITS = ( PGDIR_SHIFT + PGDIR_BITS ) . But
* that doesn ' t prevent user code from accessing the
* higher xuseg addresses . Here , we make sure that
* everything but the lower xuseg addresses goes down
* the module_alloc / vmalloc path .
*/
uasm_i_dsrl_safe ( p , ptr , tmp , PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3 ) ;
uasm_il_bnez ( p , r , ptr , label_vmalloc ) ;
} else {
uasm_il_bltz ( p , r , tmp , label_vmalloc ) ;
}
2008-01-28 20:05:38 +00:00
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
2005-04-16 15:20:36 -07:00
2009-10-14 12:16:56 -07:00
# ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2010-12-21 14:19:11 -08:00
if ( pgd_reg ! = - 1 ) {
/* pgd is in pgd_reg */
UASM_i_MFC0 ( p , ptr , 31 , pgd_reg ) ;
} else {
/*
* & pgd < < 11 stored in CONTEXT [ 23. .63 ] .
*/
UASM_i_MFC0 ( p , ptr , C0_CONTEXT ) ;
/* Clear lower 23 bits of context. */
uasm_i_dins ( p , ptr , 0 , 0 , 23 ) ;
/* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori ( p , ptr , ptr , 0x540 ) ;
uasm_i_drotr ( p , ptr , ptr , 11 ) ;
}
2009-10-14 12:16:56 -07:00
# elif defined(CONFIG_SMP)
2006-04-05 09:45:45 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as " CPU " index
*/
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , ptr , C0_TCBIND ) ;
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( p , ptr , ptr , 19 ) ;
2006-04-05 09:45:45 +01:00
# else
2005-04-16 15:20:36 -07:00
/*
2005-04-01 14:07:13 +00:00
* 64 bit SMP running in XKPHYS has smp_processor_id ( ) < < 3
2005-04-16 15:20:36 -07:00
* stored in CONTEXT .
*/
2008-01-28 20:05:38 +00:00
uasm_i_dmfc0 ( p , ptr , C0_CONTEXT ) ;
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( p , ptr , ptr , 23 ) ;
2009-10-14 12:16:56 -07:00
# endif
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ;
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ;
uasm_i_ld ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , ptr , pgdc ) ;
uasm_i_ld ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
2005-04-16 15:20:36 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_l_vmalloc_done ( l , * p ) ;
2006-10-24 02:29:01 +01:00
2010-04-28 12:16:17 -07:00
/* get pgd offset in bytes */
uasm_i_dsrl_safe ( p , tmp , tmp , PGDIR_SHIFT - 3 ) ;
2008-01-28 20:05:38 +00:00
uasm_i_andi ( p , tmp , tmp , ( PTRS_PER_PGD - 1 ) < < 3 ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ; /* add in pgd offset */
2009-12-04 13:52:36 -08:00
# ifndef __PAGETABLE_PMD_FOLDED
2008-01-28 20:05:38 +00:00
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ; /* get faulting address */
uasm_i_ld ( p , ptr , 0 , ptr ) ; /* get pmd pointer */
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( p , tmp , tmp , PMD_SHIFT - 3 ) ; /* get pmd offset in bytes */
2008-01-28 20:05:38 +00:00
uasm_i_andi ( p , tmp , tmp , ( PTRS_PER_PMD - 1 ) < < 3 ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ; /* add in pmd offset */
2009-12-04 13:52:36 -08:00
# endif
2005-04-16 15:20:36 -07:00
}
/*
* BVADDR is the faulting address , PTR is scratch .
* PTR will hold the pgd for vmalloc .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_get_pgd_vmalloc64 ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2010-04-28 12:16:18 -07:00
unsigned int bvaddr , unsigned int ptr ,
enum vmalloc64_mode mode )
2005-04-16 15:20:36 -07:00
{
long swpd = ( long ) swapper_pg_dir ;
2010-04-28 12:16:18 -07:00
int single_insn_swpd ;
int did_vmalloc_branch = 0 ;
single_insn_swpd = uasm_in_compat_space_p ( swpd ) & & ! uasm_rel_lo ( swpd ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_vmalloc ( l , * p ) ;
2005-04-16 15:20:36 -07:00
2010-12-27 18:07:57 -08:00
if ( mode ! = not_refill & & check_for_high_segbits ) {
2010-04-28 12:16:18 -07:00
if ( single_insn_swpd ) {
uasm_il_bltz ( p , r , bvaddr , label_vmalloc_done ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( swpd ) ) ;
did_vmalloc_branch = 1 ;
/* fall through */
} else {
uasm_il_bgez ( p , r , bvaddr , label_large_segbits_fault ) ;
}
}
if ( ! did_vmalloc_branch ) {
if ( uasm_in_compat_space_p ( swpd ) & & ! uasm_rel_lo ( swpd ) ) {
uasm_il_b ( p , r , label_vmalloc_done ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( swpd ) ) ;
} else {
UASM_i_LA_mostly ( p , ptr , swpd ) ;
uasm_il_b ( p , r , label_vmalloc_done ) ;
if ( uasm_in_compat_space_p ( swpd ) )
uasm_i_addiu ( p , ptr , ptr , uasm_rel_lo ( swpd ) ) ;
else
uasm_i_daddiu ( p , ptr , ptr , uasm_rel_lo ( swpd ) ) ;
}
}
2010-12-27 18:07:57 -08:00
if ( mode ! = not_refill & & check_for_high_segbits ) {
2010-04-28 12:16:18 -07:00
uasm_l_large_segbits_fault ( l , * p ) ;
/*
* We get here if we are an xsseg address , or if we are
* an xuseg address above ( PGDIR_SHIFT + PGDIR_BITS ) boundary .
*
* Ignoring xsseg ( assume disabled so would generate
* ( address errors ? ) , the only remaining possibility
* is the upper xuseg addresses . On processors with
* TLB_SEGBITS < = PGDIR_SHIFT + PGDIR_BITS , these
* addresses would have taken an address error . We try
* to mimic that here by taking a load / istream page
* fault .
*/
UASM_i_LA ( p , ptr , ( unsigned long ) tlb_do_page_fault_0 ) ;
uasm_i_jr ( p , ptr ) ;
2010-12-27 18:07:57 -08:00
if ( mode = = refill_scratch ) {
if ( scratch_reg > 0 )
UASM_i_MFC0 ( p , 1 , 31 , scratch_reg ) ;
else
UASM_i_LW ( p , 1 , scratchpad_offset ( 0 ) , 0 ) ;
} else {
uasm_i_nop ( p ) ;
}
2005-04-16 15:20:36 -07:00
}
}
2005-09-03 15:56:16 -07:00
# else /* !CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
/*
* TMP and PTR are scratch .
* TMP will be clobbered , PTR will hold the pgd entry .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit __maybe_unused
2005-04-16 15:20:36 -07:00
build_get_pgde32 ( u32 * * p , unsigned int tmp , unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
# ifdef CONFIG_SMP
2006-04-05 09:45:45 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as " CPU " index
*/
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , ptr , C0_TCBIND ) ;
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_srl ( p , ptr , ptr , 19 ) ;
2006-04-05 09:45:45 +01:00
# else
/*
* smp_processor_id ( ) < < 3 is stored in CONTEXT .
*/
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , ptr , C0_CONTEXT ) ;
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_srl ( p , ptr , ptr , 23 ) ;
2006-04-05 09:45:45 +01:00
# endif
2008-01-28 20:05:38 +00:00
uasm_i_addu ( p , ptr , tmp , ptr ) ;
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , ptr , pgdc ) ;
2005-04-16 15:20:36 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , tmp , C0_BADVADDR ) ; /* get faulting address */
uasm_i_lw ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
uasm_i_srl ( p , tmp , tmp , PGDIR_SHIFT ) ; /* get pgd only bits */
uasm_i_sll ( p , tmp , tmp , PGD_T_LOG2 ) ;
uasm_i_addu ( p , ptr , ptr , tmp ) ; /* add in pgd offset */
2005-04-16 15:20:36 -07:00
}
2005-09-03 15:56:16 -07:00
# endif /* !CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
2008-03-08 09:56:28 +00:00
static void __cpuinit build_adjust_context ( u32 * * p , unsigned int ctx )
2005-04-16 15:20:36 -07:00
{
2006-10-24 02:29:01 +01:00
unsigned int shift = 4 - ( PTE_T_LOG2 + 1 ) + PAGE_SHIFT - 12 ;
2005-04-16 15:20:36 -07:00
unsigned int mask = ( PTRS_PER_PTE / 2 - 1 ) < < ( PTE_T_LOG2 + 1 ) ;
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_VR41XX :
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4131 :
case CPU_VR4181 :
case CPU_VR4181A :
case CPU_VR4133 :
shift + = 2 ;
break ;
default :
break ;
}
if ( shift )
2008-01-28 20:05:38 +00:00
UASM_i_SRL ( p , ctx , ctx , shift ) ;
uasm_i_andi ( p , ctx , ctx , mask ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_get_ptep ( u32 * * p , unsigned int tmp , unsigned int ptr )
2005-04-16 15:20:36 -07:00
{
/*
* Bug workaround for the Nevada . It seems as if under certain
* circumstances the move from cp0_context might produce a
* bogus result when the mfc0 instruction and its consumer are
* in a different cacheline or a load instruction , probably any
* memory reference , is between them .
*/
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_NEVADA :
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , ptr , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
GET_CONTEXT ( p , tmp ) ; /* get context reg */
break ;
default :
GET_CONTEXT ( p , tmp ) ; /* get context reg */
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , ptr , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
break ;
}
build_adjust_context ( p , tmp ) ;
2008-01-28 20:05:38 +00:00
UASM_i_ADDU ( p , ptr , ptr , tmp ) ; /* add in offset */
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_update_entries ( u32 * * p , unsigned int tmp ,
2005-04-16 15:20:36 -07:00
unsigned int ptep )
{
/*
* 64 bit address support ( 36 bit on a 32 bit CPU ) in a 32 bit
* Kernel is a special case . Only a few CPUs use it .
*/
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits ) {
2008-01-28 20:05:38 +00:00
uasm_i_ld ( p , tmp , 0 , ptep ) ; /* get even pte */
uasm_i_ld ( p , ptep , sizeof ( pte_t ) , ptep ) ; /* get odd pte */
2010-02-10 15:12:47 -08:00
if ( kernel_uses_smartmips_rixi ) {
UASM_i_SRL ( p , tmp , tmp , ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_SRL ( p , ptep , ptep , ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_ROTR ( p , tmp , tmp , ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_MTC0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
UASM_i_ROTR ( p , ptep , ptep , ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
} else {
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( p , tmp , tmp , ilog2 ( _PAGE_GLOBAL ) ) ; /* convert to entrylo0 */
2010-02-10 15:12:47 -08:00
UASM_i_MTC0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( p , ptep , ptep , ilog2 ( _PAGE_GLOBAL ) ) ; /* convert to entrylo1 */
2010-02-10 15:12:47 -08:00
}
2010-02-10 15:12:44 -08:00
UASM_i_MTC0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-16 15:20:36 -07:00
} else {
int pte_off_even = sizeof ( pte_t ) / 2 ;
int pte_off_odd = pte_off_even + sizeof ( pte_t ) ;
/* The pte entries are pre-shifted */
2008-01-28 20:05:38 +00:00
uasm_i_lw ( p , tmp , pte_off_even , ptep ) ; /* get even pte */
2010-02-10 15:12:44 -08:00
UASM_i_MTC0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
2008-01-28 20:05:38 +00:00
uasm_i_lw ( p , ptep , pte_off_odd , ptep ) ; /* get odd pte */
2010-02-10 15:12:44 -08:00
UASM_i_MTC0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-16 15:20:36 -07:00
}
# else
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , tmp , 0 , ptep ) ; /* get even pte */
UASM_i_LW ( p , ptep , sizeof ( pte_t ) , ptep ) ; /* get odd pte */
2005-04-16 15:20:36 -07:00
if ( r45k_bvahwbug ( ) )
build_tlb_probe_entry ( p ) ;
2010-02-10 15:12:47 -08:00
if ( kernel_uses_smartmips_rixi ) {
UASM_i_SRL ( p , tmp , tmp , ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_SRL ( p , ptep , ptep , ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_ROTR ( p , tmp , tmp , ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
if ( r4k_250MHZhwbug ( ) )
UASM_i_MTC0 ( p , 0 , C0_ENTRYLO0 ) ;
UASM_i_MTC0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
UASM_i_ROTR ( p , ptep , ptep , ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
} else {
UASM_i_SRL ( p , tmp , tmp , ilog2 ( _PAGE_GLOBAL ) ) ; /* convert to entrylo0 */
if ( r4k_250MHZhwbug ( ) )
UASM_i_MTC0 ( p , 0 , C0_ENTRYLO0 ) ;
UASM_i_MTC0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
UASM_i_SRL ( p , ptep , ptep , ilog2 ( _PAGE_GLOBAL ) ) ; /* convert to entrylo1 */
if ( r45k_bvahwbug ( ) )
uasm_i_mfc0 ( p , tmp , C0_INDEX ) ;
}
2005-04-16 15:20:36 -07:00
if ( r4k_250MHZhwbug ( ) )
2010-02-10 15:12:44 -08:00
UASM_i_MTC0 ( p , 0 , C0_ENTRYLO1 ) ;
UASM_i_MTC0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-16 15:20:36 -07:00
# endif
}
2010-12-27 18:07:57 -08:00
struct mips_huge_tlb_info {
int huge_pte ;
int restore_scratch ;
} ;
static struct mips_huge_tlb_info __cpuinit
build_fast_tlb_refill_handler ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int tmp ,
unsigned int ptr , int c0_scratch )
{
struct mips_huge_tlb_info rv ;
unsigned int even , odd ;
int vmalloc_branch_delay_filled = 0 ;
const int scratch = 1 ; /* Our extra working register */
rv . huge_pte = scratch ;
rv . restore_scratch = 0 ;
if ( check_for_high_segbits ) {
UASM_i_MFC0 ( p , tmp , C0_BADVADDR ) ;
if ( pgd_reg ! = - 1 )
UASM_i_MFC0 ( p , ptr , 31 , pgd_reg ) ;
else
UASM_i_MFC0 ( p , ptr , C0_CONTEXT ) ;
if ( c0_scratch > = 0 )
UASM_i_MTC0 ( p , scratch , 31 , c0_scratch ) ;
else
UASM_i_SW ( p , scratch , scratchpad_offset ( 0 ) , 0 ) ;
uasm_i_dsrl_safe ( p , scratch , tmp ,
PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3 ) ;
uasm_il_bnez ( p , r , scratch , label_vmalloc ) ;
if ( pgd_reg = = - 1 ) {
vmalloc_branch_delay_filled = 1 ;
/* Clear lower 23 bits of context. */
uasm_i_dins ( p , ptr , 0 , 0 , 23 ) ;
}
} else {
if ( pgd_reg ! = - 1 )
UASM_i_MFC0 ( p , ptr , 31 , pgd_reg ) ;
else
UASM_i_MFC0 ( p , ptr , C0_CONTEXT ) ;
UASM_i_MFC0 ( p , tmp , C0_BADVADDR ) ;
if ( c0_scratch > = 0 )
UASM_i_MTC0 ( p , scratch , 31 , c0_scratch ) ;
else
UASM_i_SW ( p , scratch , scratchpad_offset ( 0 ) , 0 ) ;
if ( pgd_reg = = - 1 )
/* Clear lower 23 bits of context. */
uasm_i_dins ( p , ptr , 0 , 0 , 23 ) ;
uasm_il_bltz ( p , r , tmp , label_vmalloc ) ;
}
if ( pgd_reg = = - 1 ) {
vmalloc_branch_delay_filled = 1 ;
/* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori ( p , ptr , ptr , 0x540 ) ;
uasm_i_drotr ( p , ptr , ptr , 11 ) ;
}
# ifdef __PAGETABLE_PMD_FOLDED
# define LOC_PTEP scratch
# else
# define LOC_PTEP ptr
# endif
if ( ! vmalloc_branch_delay_filled )
/* get pgd offset in bytes */
uasm_i_dsrl_safe ( p , scratch , tmp , PGDIR_SHIFT - 3 ) ;
uasm_l_vmalloc_done ( l , * p ) ;
/*
* tmp ptr
* fall - through case = badvaddr * pgd_current
* vmalloc case = badvaddr swapper_pg_dir
*/
if ( vmalloc_branch_delay_filled )
/* get pgd offset in bytes */
uasm_i_dsrl_safe ( p , scratch , tmp , PGDIR_SHIFT - 3 ) ;
# ifdef __PAGETABLE_PMD_FOLDED
GET_CONTEXT ( p , tmp ) ; /* get context reg */
# endif
uasm_i_andi ( p , scratch , scratch , ( PTRS_PER_PGD - 1 ) < < 3 ) ;
if ( use_lwx_insns ( ) ) {
UASM_i_LWX ( p , LOC_PTEP , scratch , ptr ) ;
} else {
uasm_i_daddu ( p , ptr , ptr , scratch ) ; /* add in pgd offset */
uasm_i_ld ( p , LOC_PTEP , 0 , ptr ) ; /* get pmd pointer */
}
# ifndef __PAGETABLE_PMD_FOLDED
/* get pmd offset in bytes */
uasm_i_dsrl_safe ( p , scratch , tmp , PMD_SHIFT - 3 ) ;
uasm_i_andi ( p , scratch , scratch , ( PTRS_PER_PMD - 1 ) < < 3 ) ;
GET_CONTEXT ( p , tmp ) ; /* get context reg */
if ( use_lwx_insns ( ) ) {
UASM_i_LWX ( p , scratch , scratch , ptr ) ;
} else {
uasm_i_daddu ( p , ptr , ptr , scratch ) ; /* add in pmd offset */
UASM_i_LW ( p , scratch , 0 , ptr ) ;
}
# endif
/* Adjust the context during the load latency. */
build_adjust_context ( p , tmp ) ;
# ifdef CONFIG_HUGETLB_PAGE
uasm_il_bbit1 ( p , r , scratch , ilog2 ( _PAGE_HUGE ) , label_tlb_huge_update ) ;
/*
* The in the LWX case we don ' t want to do the load in the
* delay slot . It cannot issue in the same cycle and may be
* speculative and unneeded .
*/
if ( use_lwx_insns ( ) )
uasm_i_nop ( p ) ;
# endif /* CONFIG_HUGETLB_PAGE */
/* build_update_entries */
if ( use_lwx_insns ( ) ) {
even = ptr ;
odd = tmp ;
UASM_i_LWX ( p , even , scratch , tmp ) ;
UASM_i_ADDIU ( p , tmp , tmp , sizeof ( pte_t ) ) ;
UASM_i_LWX ( p , odd , scratch , tmp ) ;
} else {
UASM_i_ADDU ( p , ptr , scratch , tmp ) ; /* add in offset */
even = tmp ;
odd = ptr ;
UASM_i_LW ( p , even , 0 , ptr ) ; /* get even pte */
UASM_i_LW ( p , odd , sizeof ( pte_t ) , ptr ) ; /* get odd pte */
}
if ( kernel_uses_smartmips_rixi ) {
uasm_i_dsrl_safe ( p , even , even , ilog2 ( _PAGE_NO_EXEC ) ) ;
uasm_i_dsrl_safe ( p , odd , odd , ilog2 ( _PAGE_NO_EXEC ) ) ;
uasm_i_drotr ( p , even , even ,
ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
UASM_i_MTC0 ( p , even , C0_ENTRYLO0 ) ; /* load it */
uasm_i_drotr ( p , odd , odd ,
ilog2 ( _PAGE_GLOBAL ) - ilog2 ( _PAGE_NO_EXEC ) ) ;
} else {
uasm_i_dsrl_safe ( p , even , even , ilog2 ( _PAGE_GLOBAL ) ) ;
UASM_i_MTC0 ( p , even , C0_ENTRYLO0 ) ; /* load it */
uasm_i_dsrl_safe ( p , odd , odd , ilog2 ( _PAGE_GLOBAL ) ) ;
}
UASM_i_MTC0 ( p , odd , C0_ENTRYLO1 ) ; /* load it */
if ( c0_scratch > = 0 ) {
UASM_i_MFC0 ( p , scratch , 31 , c0_scratch ) ;
build_tlb_write_entry ( p , l , r , tlb_random ) ;
uasm_l_leave ( l , * p ) ;
rv . restore_scratch = 1 ;
} else if ( PAGE_SHIFT = = 14 | | PAGE_SHIFT = = 13 ) {
build_tlb_write_entry ( p , l , r , tlb_random ) ;
uasm_l_leave ( l , * p ) ;
UASM_i_LW ( p , scratch , scratchpad_offset ( 0 ) , 0 ) ;
} else {
UASM_i_LW ( p , scratch , scratchpad_offset ( 0 ) , 0 ) ;
build_tlb_write_entry ( p , l , r , tlb_random ) ;
uasm_l_leave ( l , * p ) ;
rv . restore_scratch = 1 ;
}
uasm_i_eret ( p ) ; /* return from trap */
return rv ;
}
2009-05-20 11:40:58 -07:00
/*
* For a 64 - bit kernel , we are using the 64 - bit XTLB refill exception
* because EXL = = 0. If we wrap , we can also use the 32 instruction
* slots before the XTLB refill exception handler which belong to the
* unused TLB refill exception .
*/
# define MIPS64_REFILL_INSNS 32
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_refill_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = tlb_handler ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
u32 * f ;
unsigned int final_len ;
2011-03-29 10:54:54 +02:00
struct mips_huge_tlb_info htlb_info __maybe_unused ;
enum vmalloc64_mode vmalloc_mode __maybe_unused ;
2005-04-16 15:20:36 -07:00
memset ( tlb_handler , 0 , sizeof ( tlb_handler ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
memset ( final_handler , 0 , sizeof ( final_handler ) ) ;
2010-12-27 18:07:57 -08:00
if ( ( scratch_reg > 0 | | scratchpad_available ( ) ) & & use_bbit_insns ( ) ) {
htlb_info = build_fast_tlb_refill_handler ( & p , & l , & r , K0 , K1 ,
scratch_reg ) ;
vmalloc_mode = refill_scratch ;
} else {
htlb_info . huge_pte = K0 ;
htlb_info . restore_scratch = 0 ;
vmalloc_mode = refill_noscratch ;
/*
* create the plain linear handler
*/
if ( bcm1250_m3_war ( ) ) {
unsigned int segbits = 44 ;
uasm_i_dmfc0 ( & p , K0 , C0_BADVADDR ) ;
uasm_i_dmfc0 ( & p , K1 , C0_ENTRYHI ) ;
uasm_i_xor ( & p , K0 , K0 , K1 ) ;
uasm_i_dsrl_safe ( & p , K1 , K0 , 62 ) ;
uasm_i_dsrl_safe ( & p , K0 , K0 , 12 + 1 ) ;
uasm_i_dsll_safe ( & p , K0 , K0 , 64 + 12 + 1 - segbits ) ;
uasm_i_or ( & p , K0 , K0 , K1 ) ;
uasm_il_bnez ( & p , & r , K0 , label_leave ) ;
/* No need for uasm_i_nop */
}
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2010-12-27 18:07:57 -08:00
build_get_pmde64 ( & p , & l , & r , K0 , K1 ) ; /* get pmd in K1 */
2005-04-16 15:20:36 -07:00
# else
2010-12-27 18:07:57 -08:00
build_get_pgde32 ( & p , K0 , K1 ) ; /* get pgd in K1 */
2005-04-16 15:20:36 -07:00
# endif
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
2010-12-27 18:07:57 -08:00
build_is_huge_pte ( & p , & r , K0 , K1 , label_tlb_huge_update ) ;
2009-05-27 17:47:44 -07:00
# endif
2010-12-27 18:07:57 -08:00
build_get_ptep ( & p , K0 , K1 ) ;
build_update_entries ( & p , K0 , K1 ) ;
build_tlb_write_entry ( & p , & l , & r , tlb_random ) ;
uasm_l_leave ( & l , p ) ;
uasm_i_eret ( & p ) ; /* return from trap */
}
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
uasm_l_tlb_huge_update ( & l , p ) ;
2010-12-27 18:07:57 -08:00
build_huge_update_entries ( & p , htlb_info . huge_pte , K1 ) ;
build_huge_tlb_write_entry ( & p , & l , & r , K0 , tlb_random ,
htlb_info . restore_scratch ) ;
2009-05-27 17:47:44 -07:00
# endif
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2010-12-27 18:07:57 -08:00
build_get_pgd_vmalloc64 ( & p , & l , & r , K0 , K1 , vmalloc_mode ) ;
2005-04-16 15:20:36 -07:00
# endif
/*
* Overflow check : For the 64 bit handler , we need at least one
* free instruction slot for the wrap - around branch . In worst
* case , if the intended insertion point is a delay slot , we
2006-10-03 22:21:02 +02:00
* need three , with the second nop ' ed and the third being
2005-04-16 15:20:36 -07:00
* unused .
*/
2007-06-06 14:52:43 +08:00
/* Loongson2 ebase is different than r4k, we have more space */
# if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
2005-04-16 15:20:36 -07:00
if ( ( p - tlb_handler ) > 64 )
panic ( " TLB refill handler space exceeded " ) ;
# else
2009-05-20 11:40:58 -07:00
if ( ( ( p - tlb_handler ) > ( MIPS64_REFILL_INSNS * 2 ) - 1 )
| | ( ( ( p - tlb_handler ) > ( MIPS64_REFILL_INSNS * 2 ) - 3 )
& & uasm_insn_has_bdelay ( relocs ,
tlb_handler + MIPS64_REFILL_INSNS - 3 ) ) )
2005-04-16 15:20:36 -07:00
panic ( " TLB refill handler space exceeded " ) ;
# endif
/*
* Now fold the handler in the TLB refill handler space .
*/
2007-06-06 14:52:43 +08:00
# if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
2005-04-16 15:20:36 -07:00
f = final_handler ;
/* Simplest case, just copy the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , tlb_handler , p , f ) ;
2005-04-16 15:20:36 -07:00
final_len = p - tlb_handler ;
2005-09-03 15:56:16 -07:00
# else /* CONFIG_64BIT */
2009-05-20 11:40:58 -07:00
f = final_handler + MIPS64_REFILL_INSNS ;
if ( ( p - tlb_handler ) < = MIPS64_REFILL_INSNS ) {
2005-04-16 15:20:36 -07:00
/* Just copy the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , tlb_handler , p , f ) ;
2005-04-16 15:20:36 -07:00
final_len = p - tlb_handler ;
} else {
2009-05-27 17:47:44 -07:00
# if defined(CONFIG_HUGETLB_PAGE)
const enum label_id ls = label_tlb_huge_update ;
2009-05-20 11:40:59 -07:00
# else
const enum label_id ls = label_vmalloc ;
# endif
u32 * split ;
int ov = 0 ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( labels ) & & labels [ i ] . lab ! = ls ; i + + )
;
BUG_ON ( i = = ARRAY_SIZE ( labels ) ) ;
split = labels [ i ] . addr ;
2005-04-16 15:20:36 -07:00
/*
2009-05-20 11:40:59 -07:00
* See if we have overflown one way or the other .
2005-04-16 15:20:36 -07:00
*/
2009-05-20 11:40:59 -07:00
if ( split > tlb_handler + MIPS64_REFILL_INSNS | |
split < p - MIPS64_REFILL_INSNS )
ov = 1 ;
if ( ov ) {
/*
* Split two instructions before the end . One
* for the branch and one for the instruction
* in the delay slot .
*/
split = tlb_handler + MIPS64_REFILL_INSNS - 2 ;
/*
* If the branch would fall in a delay slot ,
* we must back up an additional instruction
* so that it is no longer in a delay slot .
*/
if ( uasm_insn_has_bdelay ( relocs , split - 1 ) )
split - - ;
}
2005-04-16 15:20:36 -07:00
/* Copy first part of the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , tlb_handler , split , f ) ;
2005-04-16 15:20:36 -07:00
f + = split - tlb_handler ;
2009-05-20 11:40:59 -07:00
if ( ov ) {
/* Insert branch. */
uasm_l_split ( & l , final_handler ) ;
uasm_il_b ( & f , & r , label_split ) ;
if ( uasm_insn_has_bdelay ( relocs , split ) )
uasm_i_nop ( & f ) ;
else {
uasm_copy_handler ( relocs , labels ,
split , split + 1 , f ) ;
uasm_move_labels ( labels , f , f + 1 , - 1 ) ;
f + + ;
split + + ;
}
2005-04-16 15:20:36 -07:00
}
/* Copy the rest of the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , split , p , final_handler ) ;
2009-05-20 11:40:58 -07:00
final_len = ( f - ( final_handler + MIPS64_REFILL_INSNS ) ) +
( p - split ) ;
2005-04-16 15:20:36 -07:00
}
2005-09-03 15:56:16 -07:00
# endif /* CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB refill handler (%u instructions). \n " ,
final_len ) ;
2005-04-16 15:20:36 -07:00
2006-03-29 18:53:00 +01:00
memcpy ( ( void * ) ebase , final_handler , 0x100 ) ;
2007-10-18 09:11:17 +02:00
dump_handler ( ( u32 * ) ebase , 64 ) ;
2005-04-16 15:20:36 -07:00
}
/*
* 128 instructions for the fastpath handler is generous and should
* never be exceeded .
*/
# define FASTPATH_SIZE 128
2007-10-18 09:11:16 +02:00
u32 handle_tlbl [ FASTPATH_SIZE ] __cacheline_aligned ;
u32 handle_tlbs [ FASTPATH_SIZE ] __cacheline_aligned ;
u32 handle_tlbm [ FASTPATH_SIZE ] __cacheline_aligned ;
2010-12-21 14:19:11 -08:00
# ifdef CONFIG_MIPS_PGD_C0_CONTEXT
u32 tlbmiss_handler_setup_pgd [ 16 ] __cacheline_aligned ;
static void __cpuinit build_r4000_setup_pgd ( void )
{
const int a0 = 4 ;
const int a1 = 5 ;
u32 * p = tlbmiss_handler_setup_pgd ;
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
memset ( tlbmiss_handler_setup_pgd , 0 , sizeof ( tlbmiss_handler_setup_pgd ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
pgd_reg = allocate_kscratch ( ) ;
if ( pgd_reg = = - 1 ) {
/* PGD << 11 in c0_Context */
/*
* If it is a ckseg0 address , convert to a physical
* address . Shifting right by 29 and adding 4 will
* result in zero for these addresses .
*
*/
UASM_i_SRA ( & p , a1 , a0 , 29 ) ;
UASM_i_ADDIU ( & p , a1 , a1 , 4 ) ;
uasm_il_bnez ( & p , & r , a1 , label_tlbl_goaround1 ) ;
uasm_i_nop ( & p ) ;
uasm_i_dinsm ( & p , a0 , 0 , 29 , 64 - 29 ) ;
uasm_l_tlbl_goaround1 ( & l , p ) ;
UASM_i_SLL ( & p , a0 , a0 , 11 ) ;
uasm_i_jr ( & p , 31 ) ;
UASM_i_MTC0 ( & p , a0 , C0_CONTEXT ) ;
} else {
/* PGD in c0_KScratch */
uasm_i_jr ( & p , 31 ) ;
UASM_i_MTC0 ( & p , a0 , 31 , pgd_reg ) ;
}
if ( p - tlbmiss_handler_setup_pgd > ARRAY_SIZE ( tlbmiss_handler_setup_pgd ) )
panic ( " tlbmiss_handler_setup_pgd space exceeded " ) ;
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote tlbmiss_handler_setup_pgd (%u instructions). \n " ,
( unsigned int ) ( p - tlbmiss_handler_setup_pgd ) ) ;
dump_handler ( tlbmiss_handler_setup_pgd ,
ARRAY_SIZE ( tlbmiss_handler_setup_pgd ) ) ;
}
# endif
2005-04-16 15:20:36 -07:00
2008-03-08 09:56:28 +00:00
static void __cpuinit
2009-05-08 15:10:50 -07:00
iPTE_LW ( u32 * * p , unsigned int pte , unsigned int ptr )
2005-04-16 15:20:36 -07:00
{
# ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_lld ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_LL ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
# else
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_ld ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
# endif
}
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
iPTE_SW ( u32 * * p , struct uasm_reloc * * r , unsigned int pte , unsigned int ptr ,
2005-04-28 08:52:57 +00:00
unsigned int mode )
2005-04-16 15:20:36 -07:00
{
2005-04-28 08:52:57 +00:00
# ifdef CONFIG_64BIT_PHYS_ADDR
unsigned int hwmode = mode & ( _PAGE_VALID | _PAGE_DIRTY ) ;
# endif
2008-01-28 20:05:38 +00:00
uasm_i_ori ( p , pte , pte , mode ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_scd ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_SC ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
if ( r10000_llsc_war ( ) )
2008-01-28 20:05:38 +00:00
uasm_il_beqzl ( p , r , pte , label_smp_pgtable_change ) ;
2005-04-16 15:20:36 -07:00
else
2008-01-28 20:05:38 +00:00
uasm_il_beqz ( p , r , pte , label_smp_pgtable_change ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( ! cpu_has_64bits ) {
2008-01-28 20:05:38 +00:00
/* no uasm_i_nop needed */
uasm_i_ll ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_ori ( p , pte , pte , hwmode ) ;
uasm_i_sc ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_il_beqz ( p , r , pte , label_smp_pgtable_change ) ;
/* no uasm_i_nop needed */
uasm_i_lw ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
} else
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
# endif
# else
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_sd ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_SW ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( ! cpu_has_64bits ) {
2008-01-28 20:05:38 +00:00
uasm_i_lw ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_ori ( p , pte , pte , hwmode ) ;
uasm_i_sw ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_lw ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
}
# endif
# endif
}
/*
* Check if PTE is present , if not then jump to LABEL . PTR points to
* the page table where this PTE is located , PTE will be re - loaded
* with it ' s original value .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2009-05-08 15:10:50 -07:00
build_pte_present ( u32 * * p , struct uasm_reloc * * r ,
2011-07-05 16:34:46 -07:00
int pte , int ptr , int scratch , enum label_id lid )
2005-04-16 15:20:36 -07:00
{
2011-07-05 16:34:46 -07:00
int t = scratch > = 0 ? scratch : pte ;
2010-02-10 15:12:47 -08:00
if ( kernel_uses_smartmips_rixi ) {
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
uasm_il_bbit0 ( p , r , pte , ilog2 ( _PAGE_PRESENT ) , lid ) ;
uasm_i_nop ( p ) ;
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( p , t , pte , _PAGE_PRESENT ) ;
uasm_il_beqz ( p , r , t , lid ) ;
if ( pte = = t )
/* You lose the SMP race :-(*/
iPTE_LW ( p , pte , ptr ) ;
2010-12-20 15:54:50 -08:00
}
2010-02-10 15:12:47 -08:00
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( p , t , pte , _PAGE_PRESENT | _PAGE_READ ) ;
uasm_i_xori ( p , t , t , _PAGE_PRESENT | _PAGE_READ ) ;
uasm_il_bnez ( p , r , t , lid ) ;
if ( pte = = t )
/* You lose the SMP race :-(*/
iPTE_LW ( p , pte , ptr ) ;
2010-02-10 15:12:47 -08:00
}
2005-04-16 15:20:36 -07:00
}
/* Make PTE valid, store result in PTR. */
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_make_valid ( u32 * * p , struct uasm_reloc * * r , unsigned int pte ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2005-04-28 08:52:57 +00:00
unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED ;
iPTE_SW ( p , r , pte , ptr , mode ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Check if PTE can be written to , if not branch to LABEL . Regardless
* restore PTE with value from PTR when done .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2009-05-08 15:10:50 -07:00
build_pte_writable ( u32 * * p , struct uasm_reloc * * r ,
2011-07-05 16:34:46 -07:00
unsigned int pte , unsigned int ptr , int scratch ,
enum label_id lid )
2005-04-16 15:20:36 -07:00
{
2011-07-05 16:34:46 -07:00
int t = scratch > = 0 ? scratch : pte ;
uasm_i_andi ( p , t , pte , _PAGE_PRESENT | _PAGE_WRITE ) ;
uasm_i_xori ( p , t , t , _PAGE_PRESENT | _PAGE_WRITE ) ;
uasm_il_bnez ( p , r , t , lid ) ;
if ( pte = = t )
/* You lose the SMP race :-(*/
2010-12-20 15:54:50 -08:00
iPTE_LW ( p , pte , ptr ) ;
2011-07-05 16:34:46 -07:00
else
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
}
/* Make PTE writable, update software status bits as well, then store
* at PTR .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_make_write ( u32 * * p , struct uasm_reloc * * r , unsigned int pte ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2005-04-28 08:52:57 +00:00
unsigned int mode = ( _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
| _PAGE_DIRTY ) ;
iPTE_SW ( p , r , pte , ptr , mode ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Check if PTE can be modified , if not branch to LABEL . Regardless
* restore PTE with value from PTR when done .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2009-05-08 15:10:50 -07:00
build_pte_modifiable ( u32 * * p , struct uasm_reloc * * r ,
2011-07-05 16:34:46 -07:00
unsigned int pte , unsigned int ptr , int scratch ,
enum label_id lid )
2005-04-16 15:20:36 -07:00
{
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
uasm_il_bbit0 ( p , r , pte , ilog2 ( _PAGE_WRITE ) , lid ) ;
uasm_i_nop ( p ) ;
} else {
2011-07-05 16:34:46 -07:00
int t = scratch > = 0 ? scratch : pte ;
uasm_i_andi ( p , t , pte , _PAGE_WRITE ) ;
uasm_il_beqz ( p , r , t , lid ) ;
if ( pte = = t )
/* You lose the SMP race :-(*/
iPTE_LW ( p , pte , ptr ) ;
2010-12-20 15:54:50 -08:00
}
2005-04-16 15:20:36 -07:00
}
2009-10-14 12:16:56 -07:00
# ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2010-12-21 14:19:11 -08:00
2005-04-16 15:20:36 -07:00
/*
* R3000 style TLB load / store / modify handlers .
*/
2005-06-13 20:24:00 +00:00
/*
* This places the pte into ENTRYLO0 and writes it with tlbwi .
* Then it returns .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2005-06-13 20:24:00 +00:00
build_r3000_pte_reload_tlbwi ( u32 * * p , unsigned int pte , unsigned int tmp )
2005-04-16 15:20:36 -07:00
{
2008-01-28 20:05:38 +00:00
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* cp0 delay */
uasm_i_mfc0 ( p , tmp , C0_EPC ) ; /* cp0 delay */
uasm_i_tlbwi ( p ) ;
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
2005-04-16 15:20:36 -07:00
}
/*
2005-06-13 20:24:00 +00:00
* This places the pte into ENTRYLO0 and writes it with tlbwi
* or tlbwr as appropriate . This is because the index register
* may have the probe fail bit set as a result of a trap on a
* kseg2 access , i . e . without refill . Then it returns .
2005-04-16 15:20:36 -07:00
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_r3000_tlb_reload_write ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int pte ,
unsigned int tmp )
{
uasm_i_mfc0 ( p , tmp , C0_INDEX ) ;
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* cp0 delay */
uasm_il_bltz ( p , r , tmp , label_r3000_write_probe_fail ) ; /* cp0 delay */
uasm_i_mfc0 ( p , tmp , C0_EPC ) ; /* branch delay */
uasm_i_tlbwi ( p ) ; /* cp0 delay */
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
uasm_l_r3000_write_probe_fail ( l , * p ) ;
uasm_i_tlbwr ( p ) ; /* cp0 delay */
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit
2005-04-16 15:20:36 -07:00
build_r3000_tlbchange_handler_head ( u32 * * p , unsigned int pte ,
unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , pte , C0_BADVADDR ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( pgdc ) ) ; /* cp0 delay */
uasm_i_lw ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
uasm_i_srl ( p , pte , pte , 22 ) ; /* load delay */
uasm_i_sll ( p , pte , pte , 2 ) ;
uasm_i_addu ( p , ptr , ptr , pte ) ;
uasm_i_mfc0 ( p , pte , C0_CONTEXT ) ;
uasm_i_lw ( p , ptr , 0 , ptr ) ; /* cp0 delay */
uasm_i_andi ( p , pte , pte , 0xffc ) ; /* load delay */
uasm_i_addu ( p , ptr , ptr , pte ) ;
uasm_i_lw ( p , pte , 0 , ptr ) ;
uasm_i_tlbp ( p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_load_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbl ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbl , 0 , sizeof ( handle_tlbl ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
2011-07-05 16:34:46 -07:00
build_pte_present ( & p , & r , K0 , K1 , - 1 , label_nopage_tlbl ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
build_make_valid ( & p , & r , K0 , K1 ) ;
2005-06-13 20:24:00 +00:00
build_r3000_tlb_reload_write ( & p , & l , & r , K0 , K1 ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbl ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_0 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbl ) > FASTPATH_SIZE )
panic ( " TLB load handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB load handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbl , ARRAY_SIZE ( handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_store_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbs ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbs , 0 , sizeof ( handle_tlbs ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
2011-07-05 16:34:46 -07:00
build_pte_writable ( & p , & r , K0 , K1 , - 1 , label_nopage_tlbs ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
build_make_write ( & p , & r , K0 , K1 ) ;
2005-06-13 20:24:00 +00:00
build_r3000_tlb_reload_write ( & p , & l , & r , K0 , K1 ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbs ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbs ) > FASTPATH_SIZE )
panic ( " TLB store handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB store handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbs , ARRAY_SIZE ( handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_modify_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbm ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbm , 0 , sizeof ( handle_tlbm ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
2011-08-02 22:52:48 +01:00
build_pte_modifiable ( & p , & r , K0 , K1 , - 1 , label_nopage_tlbm ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
build_make_write ( & p , & r , K0 , K1 ) ;
2005-06-13 20:24:00 +00:00
build_r3000_pte_reload_tlbwi ( & p , K0 , K1 ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbm ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbm ) > FASTPATH_SIZE )
panic ( " TLB modify handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB modify handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbm ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbm , ARRAY_SIZE ( handle_tlbm ) ) ;
2005-04-16 15:20:36 -07:00
}
2009-10-14 12:16:56 -07:00
# endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
2005-04-16 15:20:36 -07:00
/*
* R4000 style TLB load / store / modify handlers .
*/
2011-07-05 16:34:46 -07:00
static struct work_registers __cpuinit
2008-01-28 20:05:38 +00:00
build_r4000_tlbchange_handler_head ( u32 * * p , struct uasm_label * * l ,
2011-07-05 16:34:46 -07:00
struct uasm_reloc * * r )
2005-04-16 15:20:36 -07:00
{
2011-07-05 16:34:46 -07:00
struct work_registers wr = build_get_work_registers ( p ) ;
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2011-07-05 16:34:46 -07:00
build_get_pmde64 ( p , l , r , wr . r1 , wr . r2 ) ; /* get pmd in ptr */
2005-04-16 15:20:36 -07:00
# else
2011-07-05 16:34:46 -07:00
build_get_pgde32 ( p , wr . r1 , wr . r2 ) ; /* get pgd in ptr */
2005-04-16 15:20:36 -07:00
# endif
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* For huge tlb entries , pmd doesn ' t contain an address but
* instead contains the tlb pte . Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing .
*/
2011-07-05 16:34:46 -07:00
build_is_huge_pte ( p , r , wr . r1 , wr . r2 , label_tlb_huge_update ) ;
2009-05-27 17:47:44 -07:00
# endif
2011-07-05 16:34:46 -07:00
UASM_i_MFC0 ( p , wr . r1 , C0_BADVADDR ) ;
UASM_i_LW ( p , wr . r2 , 0 , wr . r2 ) ;
UASM_i_SRL ( p , wr . r1 , wr . r1 , PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 ) ;
uasm_i_andi ( p , wr . r1 , wr . r1 , ( PTRS_PER_PTE - 1 ) < < PTE_T_LOG2 ) ;
UASM_i_ADDU ( p , wr . r2 , wr . r2 , wr . r1 ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2008-01-28 20:05:38 +00:00
uasm_l_smp_pgtable_change ( l , * p ) ;
# endif
2011-07-05 16:34:46 -07:00
iPTE_LW ( p , wr . r1 , wr . r2 ) ; /* get even pte */
2006-08-23 14:26:50 +01:00
if ( ! m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( p ) ;
2011-07-05 16:34:46 -07:00
return wr ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_r4000_tlbchange_handler_tail ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int tmp ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2008-01-28 20:05:38 +00:00
uasm_i_ori ( p , ptr , ptr , sizeof ( pte_t ) ) ;
uasm_i_xori ( p , ptr , ptr , sizeof ( pte_t ) ) ;
2005-04-16 15:20:36 -07:00
build_update_entries ( p , tmp , ptr ) ;
build_tlb_write_entry ( p , l , r , tlb_indexed ) ;
2008-01-28 20:05:38 +00:00
uasm_l_leave ( l , * p ) ;
2011-07-05 16:34:46 -07:00
build_restore_work_registers ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_eret ( p ) ; /* return from trap */
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2010-04-28 12:16:18 -07:00
build_get_pgd_vmalloc64 ( p , l , r , tmp , ptr , not_refill ) ;
2005-04-16 15:20:36 -07:00
# endif
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_load_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbl ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2011-07-05 16:34:46 -07:00
struct work_registers wr ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbl , 0 , sizeof ( handle_tlbl ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
if ( bcm1250_m3_war ( ) ) {
2010-03-23 17:56:38 +01:00
unsigned int segbits = 44 ;
uasm_i_dmfc0 ( & p , K0 , C0_BADVADDR ) ;
uasm_i_dmfc0 ( & p , K1 , C0_ENTRYHI ) ;
2008-01-28 20:05:38 +00:00
uasm_i_xor ( & p , K0 , K0 , K1 ) ;
2010-04-28 12:16:17 -07:00
uasm_i_dsrl_safe ( & p , K1 , K0 , 62 ) ;
uasm_i_dsrl_safe ( & p , K0 , K0 , 12 + 1 ) ;
uasm_i_dsll_safe ( & p , K0 , K0 , 64 + 12 + 1 - segbits ) ;
2010-03-23 17:56:38 +01:00
uasm_i_or ( & p , K0 , K0 , K1 ) ;
2008-01-28 20:05:38 +00:00
uasm_il_bnez ( & p , & r , K0 , label_leave ) ;
/* No need for uasm_i_nop */
2005-04-16 15:20:36 -07:00
}
2011-07-05 16:34:46 -07:00
wr = build_r4000_tlbchange_handler_head ( & p , & l , & r ) ;
build_pte_present ( & p , & r , wr . r1 , wr . r2 , wr . r3 , label_nopage_tlbl ) ;
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2010-02-10 15:12:47 -08:00
if ( kernel_uses_smartmips_rixi ) {
/*
* If the page is not _PAGE_VALID , RI or XI could not
* have triggered it . Skip the expensive test . .
*/
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
2011-07-05 16:34:46 -07:00
uasm_il_bbit0 ( & p , & r , wr . r1 , ilog2 ( _PAGE_VALID ) ,
2010-12-20 15:54:50 -08:00
label_tlbl_goaround1 ) ;
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( & p , wr . r3 , wr . r1 , _PAGE_VALID ) ;
uasm_il_beqz ( & p , & r , wr . r3 , label_tlbl_goaround1 ) ;
2010-12-20 15:54:50 -08:00
}
2010-02-10 15:12:47 -08:00
uasm_i_nop ( & p ) ;
uasm_i_tlbr ( & p ) ;
/* Examine entrylo 0 or 1 based on ptr. */
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
2011-07-05 16:34:46 -07:00
uasm_i_bbit0 ( & p , wr . r2 , ilog2 ( sizeof ( pte_t ) ) , 8 ) ;
2010-12-20 15:54:50 -08:00
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( & p , wr . r3 , wr . r2 , sizeof ( pte_t ) ) ;
uasm_i_beqz ( & p , wr . r3 , 8 ) ;
2010-12-20 15:54:50 -08:00
}
2011-07-05 16:34:46 -07:00
/* load it in the delay slot*/
UASM_i_MFC0 ( & p , wr . r3 , C0_ENTRYLO0 ) ;
/* load it if ptr is odd */
UASM_i_MFC0 ( & p , wr . r3 , C0_ENTRYLO1 ) ;
2010-02-10 15:12:47 -08:00
/*
2011-07-05 16:34:46 -07:00
* If the entryLo ( now in wr . r3 ) is valid ( bit 1 ) , RI or
2010-02-10 15:12:47 -08:00
* XI must have triggered it .
*/
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
2011-07-05 16:34:46 -07:00
uasm_il_bbit1 ( & p , & r , wr . r3 , 1 , label_nopage_tlbl ) ;
uasm_i_nop ( & p ) ;
2010-12-20 15:54:50 -08:00
uasm_l_tlbl_goaround1 ( & l , p ) ;
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( & p , wr . r3 , wr . r3 , 2 ) ;
uasm_il_bnez ( & p , & r , wr . r3 , label_nopage_tlbl ) ;
uasm_i_nop ( & p ) ;
2010-12-20 15:54:50 -08:00
}
2011-07-05 16:34:46 -07:00
uasm_l_tlbl_goaround1 ( & l , p ) ;
2010-02-10 15:12:47 -08:00
}
2011-07-05 16:34:46 -07:00
build_make_valid ( & p , & r , wr . r1 , wr . r2 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , wr . r1 , wr . r2 ) ;
2005-04-16 15:20:36 -07:00
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page .
*/
uasm_l_tlb_huge_update ( & l , p ) ;
2011-07-05 16:34:46 -07:00
iPTE_LW ( & p , wr . r1 , wr . r2 ) ;
build_pte_present ( & p , & r , wr . r1 , wr . r2 , wr . r3 , label_nopage_tlbl ) ;
2009-05-27 17:47:44 -07:00
build_tlb_probe_entry ( & p ) ;
2010-02-10 15:12:47 -08:00
if ( kernel_uses_smartmips_rixi ) {
/*
* If the page is not _PAGE_VALID , RI or XI could not
* have triggered it . Skip the expensive test . .
*/
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
2011-07-05 16:34:46 -07:00
uasm_il_bbit0 ( & p , & r , wr . r1 , ilog2 ( _PAGE_VALID ) ,
2010-12-20 15:54:50 -08:00
label_tlbl_goaround2 ) ;
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( & p , wr . r3 , wr . r1 , _PAGE_VALID ) ;
uasm_il_beqz ( & p , & r , wr . r3 , label_tlbl_goaround2 ) ;
2010-12-20 15:54:50 -08:00
}
2010-02-10 15:12:47 -08:00
uasm_i_nop ( & p ) ;
uasm_i_tlbr ( & p ) ;
/* Examine entrylo 0 or 1 based on ptr. */
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
2011-07-05 16:34:46 -07:00
uasm_i_bbit0 ( & p , wr . r2 , ilog2 ( sizeof ( pte_t ) ) , 8 ) ;
2010-12-20 15:54:50 -08:00
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( & p , wr . r3 , wr . r2 , sizeof ( pte_t ) ) ;
uasm_i_beqz ( & p , wr . r3 , 8 ) ;
2010-12-20 15:54:50 -08:00
}
2011-07-05 16:34:46 -07:00
/* load it in the delay slot*/
UASM_i_MFC0 ( & p , wr . r3 , C0_ENTRYLO0 ) ;
/* load it if ptr is odd */
UASM_i_MFC0 ( & p , wr . r3 , C0_ENTRYLO1 ) ;
2010-02-10 15:12:47 -08:00
/*
2011-07-05 16:34:46 -07:00
* If the entryLo ( now in wr . r3 ) is valid ( bit 1 ) , RI or
2010-02-10 15:12:47 -08:00
* XI must have triggered it .
*/
2010-12-20 15:54:50 -08:00
if ( use_bbit_insns ( ) ) {
2011-07-05 16:34:46 -07:00
uasm_il_bbit0 ( & p , & r , wr . r3 , 1 , label_tlbl_goaround2 ) ;
2010-12-20 15:54:50 -08:00
} else {
2011-07-05 16:34:46 -07:00
uasm_i_andi ( & p , wr . r3 , wr . r3 , 2 ) ;
uasm_il_beqz ( & p , & r , wr . r3 , label_tlbl_goaround2 ) ;
2010-12-20 15:54:50 -08:00
}
2011-09-16 18:06:02 -07:00
if ( PM_DEFAULT_MASK = = 0 )
uasm_i_nop ( & p ) ;
2010-02-10 15:12:47 -08:00
/*
* We clobbered C0_PAGEMASK , restore it . On the other branch
* it is restored in build_huge_tlb_write_entry .
*/
2011-07-05 16:34:46 -07:00
build_restore_pagemask ( & p , & r , wr . r3 , label_nopage_tlbl , 0 ) ;
2010-02-10 15:12:47 -08:00
uasm_l_tlbl_goaround2 ( & l , p ) ;
}
2011-07-05 16:34:46 -07:00
uasm_i_ori ( & p , wr . r1 , wr . r1 , ( _PAGE_ACCESSED | _PAGE_VALID ) ) ;
build_huge_handler_tail ( & p , & r , & l , wr . r1 , wr . r2 ) ;
2009-05-27 17:47:44 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbl ( & l , p ) ;
2011-07-05 16:34:46 -07:00
build_restore_work_registers ( & p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_0 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbl ) > FASTPATH_SIZE )
panic ( " TLB load handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB load handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbl , ARRAY_SIZE ( handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_store_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbs ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2011-07-05 16:34:46 -07:00
struct work_registers wr ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbs , 0 , sizeof ( handle_tlbs ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
2011-07-05 16:34:46 -07:00
wr = build_r4000_tlbchange_handler_head ( & p , & l , & r ) ;
build_pte_writable ( & p , & r , wr . r1 , wr . r2 , wr . r3 , label_nopage_tlbs ) ;
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2011-07-05 16:34:46 -07:00
build_make_write ( & p , & r , wr . r1 , wr . r2 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , wr . r1 , wr . r2 ) ;
2005-04-16 15:20:36 -07:00
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page .
*/
uasm_l_tlb_huge_update ( & l , p ) ;
2011-07-05 16:34:46 -07:00
iPTE_LW ( & p , wr . r1 , wr . r2 ) ;
build_pte_writable ( & p , & r , wr . r1 , wr . r2 , wr . r3 , label_nopage_tlbs ) ;
2009-05-27 17:47:44 -07:00
build_tlb_probe_entry ( & p ) ;
2011-07-05 16:34:46 -07:00
uasm_i_ori ( & p , wr . r1 , wr . r1 ,
2009-05-27 17:47:44 -07:00
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY ) ;
2011-07-05 16:34:46 -07:00
build_huge_handler_tail ( & p , & r , & l , wr . r1 , wr . r2 ) ;
2009-05-27 17:47:44 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbs ( & l , p ) ;
2011-07-05 16:34:46 -07:00
build_restore_work_registers ( & p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbs ) > FASTPATH_SIZE )
panic ( " TLB store handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB store handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbs , ARRAY_SIZE ( handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_modify_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbm ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2011-07-05 16:34:46 -07:00
struct work_registers wr ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbm , 0 , sizeof ( handle_tlbm ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
2011-07-05 16:34:46 -07:00
wr = build_r4000_tlbchange_handler_head ( & p , & l , & r ) ;
build_pte_modifiable ( & p , & r , wr . r1 , wr . r2 , wr . r3 , label_nopage_tlbm ) ;
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-16 15:20:36 -07:00
/* Present and writable bits set, set accessed and dirty bits. */
2011-07-05 16:34:46 -07:00
build_make_write ( & p , & r , wr . r1 , wr . r2 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , wr . r1 , wr . r2 ) ;
2005-04-16 15:20:36 -07:00
2009-05-27 17:47:44 -07:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page .
*/
uasm_l_tlb_huge_update ( & l , p ) ;
2011-07-05 16:34:46 -07:00
iPTE_LW ( & p , wr . r1 , wr . r2 ) ;
build_pte_modifiable ( & p , & r , wr . r1 , wr . r2 , wr . r3 , label_nopage_tlbm ) ;
2009-05-27 17:47:44 -07:00
build_tlb_probe_entry ( & p ) ;
2011-07-05 16:34:46 -07:00
uasm_i_ori ( & p , wr . r1 , wr . r1 ,
2009-05-27 17:47:44 -07:00
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY ) ;
2011-07-05 16:34:46 -07:00
build_huge_handler_tail ( & p , & r , & l , wr . r1 , wr . r2 ) ;
2009-05-27 17:47:44 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbm ( & l , p ) ;
2011-07-05 16:34:46 -07:00
build_restore_work_registers ( & p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbm ) > FASTPATH_SIZE )
panic ( " TLB modify handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB modify handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbm ) ) ;
2006-07-09 01:47:06 +01:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbm , ARRAY_SIZE ( handle_tlbm ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
void __cpuinit build_tlb_refill_handler ( void )
2005-04-16 15:20:36 -07:00
{
/*
* The refill handler is generated per - CPU , multi - node systems
* may have local storage for it . The other handlers are only
* needed once .
*/
static int run_once = 0 ;
2010-04-28 12:16:18 -07:00
# ifdef CONFIG_64BIT
check_for_high_segbits = current_cpu_data . vmbits > ( PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3 ) ;
# endif
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_R2000 :
case CPU_R3000 :
case CPU_R3000A :
case CPU_R3081E :
case CPU_TX3912 :
case CPU_TX3922 :
case CPU_TX3927 :
2009-10-14 12:16:56 -07:00
# ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2005-04-16 15:20:36 -07:00
build_r3000_tlb_refill_handler ( ) ;
if ( ! run_once ) {
build_r3000_tlb_load_handler ( ) ;
build_r3000_tlb_store_handler ( ) ;
build_r3000_tlb_modify_handler ( ) ;
run_once + + ;
}
2009-10-14 12:16:56 -07:00
# else
panic ( " No R3000 TLB refill handler " ) ;
# endif
2005-04-16 15:20:36 -07:00
break ;
case CPU_R6000 :
case CPU_R6000A :
panic ( " No R6000 TLB refill handler yet " ) ;
break ;
case CPU_R8000 :
panic ( " No R8000 TLB refill handler yet " ) ;
break ;
default :
if ( ! run_once ) {
2011-07-05 16:34:46 -07:00
scratch_reg = allocate_kscratch ( ) ;
2010-12-21 14:19:11 -08:00
# ifdef CONFIG_MIPS_PGD_C0_CONTEXT
build_r4000_setup_pgd ( ) ;
# endif
2005-04-16 15:20:36 -07:00
build_r4000_tlb_load_handler ( ) ;
build_r4000_tlb_store_handler ( ) ;
build_r4000_tlb_modify_handler ( ) ;
run_once + + ;
}
2010-12-21 14:19:11 -08:00
build_r4000_tlb_refill_handler ( ) ;
2005-04-16 15:20:36 -07:00
}
}
2005-07-15 15:23:23 +00:00
2008-03-08 09:56:28 +00:00
void __cpuinit flush_tlb_handlers ( void )
2005-07-15 15:23:23 +00:00
{
2008-08-04 20:53:57 +02:00
local_flush_icache_range ( ( unsigned long ) handle_tlbl ,
2005-07-15 15:23:23 +00:00
( unsigned long ) handle_tlbl + sizeof ( handle_tlbl ) ) ;
2008-08-04 20:53:57 +02:00
local_flush_icache_range ( ( unsigned long ) handle_tlbs ,
2005-07-15 15:23:23 +00:00
( unsigned long ) handle_tlbs + sizeof ( handle_tlbs ) ) ;
2008-08-04 20:53:57 +02:00
local_flush_icache_range ( ( unsigned long ) handle_tlbm ,
2005-07-15 15:23:23 +00:00
( unsigned long ) handle_tlbm + sizeof ( handle_tlbm ) ) ;
2010-12-21 14:19:11 -08:00
# ifdef CONFIG_MIPS_PGD_C0_CONTEXT
local_flush_icache_range ( ( unsigned long ) tlbmiss_handler_setup_pgd ,
( unsigned long ) tlbmiss_handler_setup_pgd + sizeof ( handle_tlbm ) ) ;
# endif
2005-07-15 15:23:23 +00:00
}