2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Synthesize TLB refill handlers at runtime .
*
2008-01-28 23:05:38 +03:00
* Copyright ( C ) 2004 , 2005 , 2006 , 2008 Thiemo Seufer
2009-05-20 22:40:59 +04:00
* Copyright ( C ) 2005 , 2007 , 2008 , 2009 Maciej W . Rozycki
2006-04-05 12:45:45 +04:00
* Copyright ( C ) 2006 Ralf Baechle ( ralf @ linux - mips . org )
2009-05-28 04:47:44 +04:00
* Copyright ( C ) 2008 , 2009 Cavium Networks , Inc .
2006-04-05 12:45:45 +04:00
*
* . . . and the days got worse and worse and now you see
* I ' ve gone completly out of my mind .
*
* They ' re coming to take me a away haha
* they ' re coming to take me a away hoho hihi haha
* to the funny farm where code is beautiful all the time . . .
*
* ( Condolences to Napoleon XIV )
2005-04-17 02:20:36 +04:00
*/
2009-05-20 22:40:59 +04:00
# include <linux/bug.h>
2005-04-17 02:20:36 +04:00
# include <linux/kernel.h>
# include <linux/types.h>
2009-06-19 17:05:26 +04:00
# include <linux/smp.h>
2005-04-17 02:20:36 +04:00
# include <linux/string.h>
# include <linux/init.h>
# include <asm/mmu_context.h>
# include <asm/war.h>
2008-01-28 23:05:38 +03:00
# include "uasm.h"
2007-10-12 02:46:14 +04:00
static inline int r45k_bvahwbug ( void )
2005-04-17 02:20:36 +04:00
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0 ;
}
2007-10-12 02:46:14 +04:00
static inline int r4k_250MHZhwbug ( void )
2005-04-17 02:20:36 +04:00
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0 ;
}
2007-10-12 02:46:14 +04:00
static inline int __maybe_unused bcm1250_m3_war ( void )
2005-04-17 02:20:36 +04:00
{
return BCM1250_M3_WAR ;
}
2007-10-12 02:46:14 +04:00
static inline int __maybe_unused r10000_llsc_war ( void )
2005-04-17 02:20:36 +04:00
{
return R10000_LLSC_WAR ;
}
2006-08-23 17:26:50 +04:00
/*
* Found by experiment : At least some revisions of the 4 kc throw under
* some circumstances a machine check exception , triggered by invalid
* values in the index register . Delaying the tlbp instruction until
* after the next branch , plus adding an additional nop in front of
* tlbwi / tlbwr avoids the invalid index register values . Nobody knows
* why ; it ' s not an issue caused by the core RTL .
*
*/
2008-03-08 12:56:28 +03:00
static int __cpuinit m4kc_tlbp_war ( void )
2006-08-23 17:26:50 +04:00
{
return ( current_cpu_data . processor_id & 0xffff00 ) = =
( PRID_COMP_MIPS | PRID_IMP_4KC ) ;
}
2008-01-28 23:05:38 +03:00
/* Handle labels (which must be positive integers). */
2005-04-17 02:20:36 +04:00
enum label_id {
2008-01-28 23:05:38 +03:00
label_second_part = 1 ,
2005-04-17 02:20:36 +04:00
label_leave ,
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 19:08:31 +04:00
# ifdef MODULE_START
label_module_alloc ,
# endif
2005-04-17 02:20:36 +04:00
label_vmalloc ,
label_vmalloc_done ,
label_tlbw_hazard ,
label_split ,
label_nopage_tlbl ,
label_nopage_tlbs ,
label_nopage_tlbm ,
label_smp_pgtable_change ,
label_r3000_write_probe_fail ,
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
label_tlb_huge_update ,
# endif
2005-04-17 02:20:36 +04:00
} ;
2008-01-28 23:05:38 +03:00
UASM_L_LA ( _second_part )
UASM_L_LA ( _leave )
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 19:08:31 +04:00
# ifdef MODULE_START
2008-01-28 23:05:38 +03:00
UASM_L_LA ( _module_alloc )
2007-10-23 15:43:25 +04:00
# endif
2008-01-28 23:05:38 +03:00
UASM_L_LA ( _vmalloc )
UASM_L_LA ( _vmalloc_done )
UASM_L_LA ( _tlbw_hazard )
UASM_L_LA ( _split )
UASM_L_LA ( _nopage_tlbl )
UASM_L_LA ( _nopage_tlbs )
UASM_L_LA ( _nopage_tlbm )
UASM_L_LA ( _smp_pgtable_change )
UASM_L_LA ( _r3000_write_probe_fail )
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
UASM_L_LA ( _tlb_huge_update )
# endif
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 19:08:31 +04:00
2007-10-18 11:11:17 +04:00
/*
* For debug purposes .
*/
static inline void dump_handler ( const u32 * handler , int count )
{
int i ;
pr_debug ( " \t .set push \n " ) ;
pr_debug ( " \t .set noreorder \n " ) ;
for ( i = 0 ; i < count ; i + + )
pr_debug ( " \t %p \t .word 0x%08x \n " , & handler [ i ] , handler [ i ] ) ;
pr_debug ( " \t .set pop \n " ) ;
}
2005-04-17 02:20:36 +04:00
/* The only general purpose registers allowed in TLB handlers. */
# define K0 26
# define K1 27
/* Some CP0 registers */
2006-04-05 12:45:45 +04:00
# define C0_INDEX 0, 0
# define C0_ENTRYLO0 2, 0
# define C0_TCBIND 2, 2
# define C0_ENTRYLO1 3, 0
# define C0_CONTEXT 4, 0
2009-05-28 04:47:44 +04:00
# define C0_PAGEMASK 5, 0
2006-04-05 12:45:45 +04:00
# define C0_BADVADDR 8, 0
# define C0_ENTRYHI 10, 0
# define C0_EPC 14, 0
# define C0_XCONTEXT 20, 0
2005-04-17 02:20:36 +04:00
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2008-01-28 23:05:38 +03:00
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
2005-04-17 02:20:36 +04:00
# else
2008-01-28 23:05:38 +03:00
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
2005-04-17 02:20:36 +04:00
# endif
/* The worst case length of the handler is around 18 instructions for
* R3000 - style TLBs and up to 63 instructions for R4000 - style TLBs .
* Maximum space available is 32 instructions for R3000 and 64
* instructions for R4000 .
*
* We deliberately chose a buffer size of 128 , so we won ' t scribble
* over anything important on overflow before we panic .
*/
2008-03-08 12:56:28 +03:00
static u32 tlb_handler [ 128 ] __cpuinitdata ;
2005-04-17 02:20:36 +04:00
/* simply assume worst case size for labels and relocs */
2008-03-08 12:56:28 +03:00
static struct uasm_label labels [ 128 ] __cpuinitdata ;
static struct uasm_reloc relocs [ 128 ] __cpuinitdata ;
2005-04-17 02:20:36 +04:00
/*
* The R3000 TLB handler is simple .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r3000_tlb_refill_handler ( void )
2005-04-17 02:20:36 +04:00
{
long pgdc = ( long ) pgd_current ;
u32 * p ;
memset ( tlb_handler , 0 , sizeof ( tlb_handler ) ) ;
p = tlb_handler ;
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( & p , K0 , C0_BADVADDR ) ;
uasm_i_lui ( & p , K1 , uasm_rel_hi ( pgdc ) ) ; /* cp0 delay */
uasm_i_lw ( & p , K1 , uasm_rel_lo ( pgdc ) , K1 ) ;
uasm_i_srl ( & p , K0 , K0 , 22 ) ; /* load delay */
uasm_i_sll ( & p , K0 , K0 , 2 ) ;
uasm_i_addu ( & p , K1 , K1 , K0 ) ;
uasm_i_mfc0 ( & p , K0 , C0_CONTEXT ) ;
uasm_i_lw ( & p , K1 , 0 , K1 ) ; /* cp0 delay */
uasm_i_andi ( & p , K0 , K0 , 0xffc ) ; /* load delay */
uasm_i_addu ( & p , K1 , K1 , K0 ) ;
uasm_i_lw ( & p , K0 , 0 , K1 ) ;
uasm_i_nop ( & p ) ; /* load delay */
uasm_i_mtc0 ( & p , K0 , C0_ENTRYLO0 ) ;
uasm_i_mfc0 ( & p , K1 , C0_EPC ) ; /* cp0 delay */
uasm_i_tlbwr ( & p ) ; /* cp0 delay */
uasm_i_jr ( & p , K1 ) ;
uasm_i_rfe ( & p ) ; /* branch delay */
2005-04-17 02:20:36 +04:00
if ( p > tlb_handler + 32 )
panic ( " TLB refill handler space exceeded " ) ;
2008-01-28 23:05:38 +03:00
pr_debug ( " Wrote TLB refill handler (%u instructions). \n " ,
( unsigned int ) ( p - tlb_handler ) ) ;
2005-04-17 02:20:36 +04:00
2006-03-29 21:53:00 +04:00
memcpy ( ( void * ) ebase , tlb_handler , 0x80 ) ;
2007-10-18 11:11:17 +04:00
dump_handler ( ( u32 * ) ebase , 32 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* The R4000 TLB handler is much more complicated . We have two
* consecutive handler areas with 32 instructions space each .
* Since they aren ' t used at the same time , we can overflow in the
* other one . To keep things simple , we first assume linear space ,
* then we relocate it to the final handler layout as needed .
*/
2008-03-08 12:56:28 +03:00
static u32 final_handler [ 64 ] __cpuinitdata ;
2005-04-17 02:20:36 +04:00
/*
* Hazards
*
* From the IDT errata for the QED RM5230 ( Nevada ) , processor revision 1.0 :
* 2. A timing hazard exists for the TLBP instruction .
*
* stalling_instruction
* TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction . This is not really correct as the stalling instruction
* can modify the address used to access the JTLB . The failure symptom is that
* the TLBP instruction will use an address created for the stalling instruction
* and not the address held in C0_ENHI and thus report the wrong results .
*
* The software work - around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall .
*
* Errata 2 will not be fixed . This errata is also on the R5000 .
*
* As if we MIPS hackers wouldn ' t know how to nop pipelines happy . . .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit __maybe_unused build_tlb_probe_entry ( u32 * * p )
2005-04-17 02:20:36 +04:00
{
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2008-05-12 15:55:42 +04:00
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */
2005-09-09 21:11:50 +04:00
case CPU_R4600 :
2008-05-12 15:55:42 +04:00
case CPU_R4700 :
2005-04-17 02:20:36 +04:00
case CPU_R5000 :
case CPU_R5000A :
case CPU_NEVADA :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
uasm_i_tlbp ( p ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2008-01-28 23:05:38 +03:00
uasm_i_tlbp ( p ) ;
2005-04-17 02:20:36 +04:00
break ;
}
}
/*
* Write random or indexed TLB entry , and care about the hazards from
* the preceeding mtc0 and for the following eret .
*/
enum tlb_write_entry { tlb_random , tlb_indexed } ;
2008-03-08 12:56:28 +03:00
static void __cpuinit build_tlb_write_entry ( u32 * * p , struct uasm_label * * l ,
2008-01-28 23:05:38 +03:00
struct uasm_reloc * * r ,
2005-04-17 02:20:36 +04:00
enum tlb_write_entry wmode )
{
void ( * tlbw ) ( u32 * * ) = NULL ;
switch ( wmode ) {
2008-01-28 23:05:38 +03:00
case tlb_random : tlbw = uasm_i_tlbwr ; break ;
case tlb_indexed : tlbw = uasm_i_tlbwi ; break ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 13:14:54 +03:00
if ( cpu_has_mips_r2 ) {
2009-05-12 23:41:53 +04:00
if ( cpu_has_mips_r2_exec_hazard )
uasm_i_ehb ( p ) ;
2008-01-29 13:14:54 +03:00
tlbw ( p ) ;
return ;
}
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2005-04-17 02:20:36 +04:00
case CPU_R4000PC :
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400PC :
case CPU_R4400SC :
case CPU_R4400MC :
/*
* This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction .
*/
2008-01-28 23:05:38 +03:00
uasm_il_bgezl ( p , r , 0 , label_tlbw_hazard ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
2008-01-28 23:05:38 +03:00
uasm_l_tlbw_hazard ( l , * p ) ;
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_R4600 :
case CPU_R4700 :
case CPU_R5000 :
case CPU_R5000A :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
2005-06-30 14:51:01 +04:00
tlbw ( p ) ;
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
2005-06-30 14:51:01 +04:00
break ;
case CPU_R4300 :
2005-04-17 02:20:36 +04:00
case CPU_5KC :
case CPU_TX49XX :
2005-07-14 21:47:57 +04:00
case CPU_PR4450 :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
break ;
case CPU_R10000 :
case CPU_R12000 :
2006-05-17 06:23:59 +04:00
case CPU_R14000 :
2005-04-17 02:20:36 +04:00
case CPU_4KC :
2008-03-26 18:42:54 +03:00
case CPU_4KEC :
2005-04-17 02:20:36 +04:00
case CPU_SB1 :
2005-10-20 10:56:20 +04:00
case CPU_SB1A :
2005-04-17 02:20:36 +04:00
case CPU_4KSC :
case CPU_20KC :
case CPU_25KF :
2007-09-25 17:40:12 +04:00
case CPU_BCM3302 :
case CPU_BCM4710 :
2007-06-06 10:52:43 +04:00
case CPU_LOONGSON2 :
2009-08-18 16:23:37 +04:00
case CPU_BCM6338 :
case CPU_BCM6345 :
case CPU_BCM6348 :
case CPU_BCM6358 :
2009-03-03 12:05:51 +03:00
case CPU_R5500 :
2006-08-23 17:26:50 +04:00
if ( m4kc_tlbp_war ( ) )
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
2009-03-25 19:49:30 +03:00
case CPU_ALCHEMY :
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
break ;
case CPU_NEVADA :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ; /* QED specifies 2 nops hazard */
2005-04-17 02:20:36 +04:00
/*
* This branch uses up a mtc0 hazard nop slot and saves
* a nop after the tlbw instruction .
*/
2008-01-28 23:05:38 +03:00
uasm_il_bgezl ( p , r , 0 , label_tlbw_hazard ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
2008-01-28 23:05:38 +03:00
uasm_l_tlbw_hazard ( l , * p ) ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_RM7000 :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
break ;
case CPU_RM9000 :
/*
* When the JTLB is updated by tlbwi or tlbwr , a subsequent
* use of the JTLB for instructions should not occur for 4
* cpu cycles and use for data translations should not occur
* for 3 cpu cycles .
*/
2008-01-28 23:05:38 +03:00
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
2008-01-28 23:05:38 +03:00
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4181 :
case CPU_VR4181A :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_VR4131 :
case CPU_VR4133 :
2005-08-29 20:49:55 +04:00
case CPU_R5432 :
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
tlbw ( p ) ;
break ;
default :
panic ( " No TLB refill handler yet (CPU type: %d) " ,
current_cpu_data . cputype ) ;
break ;
}
}
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
static __cpuinit void build_huge_tlb_write_entry ( u32 * * p ,
struct uasm_label * * l ,
struct uasm_reloc * * r ,
unsigned int tmp ,
enum tlb_write_entry wmode )
{
/* Set huge page tlb entry size */
uasm_i_lui ( p , tmp , PM_HUGE_MASK > > 16 ) ;
uasm_i_ori ( p , tmp , tmp , PM_HUGE_MASK & 0xffff ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
build_tlb_write_entry ( p , l , r , wmode ) ;
/* Reset default page size */
if ( PM_DEFAULT_MASK > > 16 ) {
uasm_i_lui ( p , tmp , PM_DEFAULT_MASK > > 16 ) ;
uasm_i_ori ( p , tmp , tmp , PM_DEFAULT_MASK & 0xffff ) ;
uasm_il_b ( p , r , label_leave ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
} else if ( PM_DEFAULT_MASK ) {
uasm_i_ori ( p , tmp , 0 , PM_DEFAULT_MASK ) ;
uasm_il_b ( p , r , label_leave ) ;
uasm_i_mtc0 ( p , tmp , C0_PAGEMASK ) ;
} else {
uasm_il_b ( p , r , label_leave ) ;
uasm_i_mtc0 ( p , 0 , C0_PAGEMASK ) ;
}
}
/*
* Check if Huge PTE is present , if so then jump to LABEL .
*/
static void __cpuinit
build_is_huge_pte ( u32 * * p , struct uasm_reloc * * r , unsigned int tmp ,
unsigned int pmd , int lid )
{
UASM_i_LW ( p , tmp , 0 , pmd ) ;
uasm_i_andi ( p , tmp , tmp , _PAGE_HUGE ) ;
uasm_il_bnez ( p , r , tmp , lid ) ;
}
static __cpuinit void build_huge_update_entries ( u32 * * p ,
unsigned int pte ,
unsigned int tmp )
{
int small_sequence ;
/*
* A huge PTE describes an area the size of the
* configured huge page size . This is twice the
* of the large TLB entry size we intend to use .
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space .
*/
small_sequence = ( HPAGE_SIZE > > 7 ) < 0x10000 ;
/* We can clobber tmp. It isn't used after this.*/
if ( ! small_sequence )
uasm_i_lui ( p , tmp , HPAGE_SIZE > > ( 7 + 16 ) ) ;
UASM_i_SRL ( p , pte , pte , 6 ) ; /* convert to entrylo */
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* load it */
/* convert to entrylo1 */
if ( small_sequence )
UASM_i_ADDIU ( p , pte , pte , HPAGE_SIZE > > 7 ) ;
else
UASM_i_ADDU ( p , pte , pte , tmp ) ;
uasm_i_mtc0 ( p , pte , C0_ENTRYLO1 ) ; /* load it */
}
static __cpuinit void build_huge_handler_tail ( u32 * * p ,
struct uasm_reloc * * r ,
struct uasm_label * * l ,
unsigned int pte ,
unsigned int ptr )
{
# ifdef CONFIG_SMP
UASM_i_SC ( p , pte , 0 , ptr ) ;
uasm_il_beqz ( p , r , pte , label_tlb_huge_update ) ;
UASM_i_LW ( p , pte , 0 , ptr ) ; /* Needed because SC killed our PTE */
# else
UASM_i_SW ( p , pte , 0 , ptr ) ;
# endif
build_huge_update_entries ( p , pte , ptr ) ;
build_huge_tlb_write_entry ( p , l , r , pte , tlb_indexed ) ;
}
# endif /* CONFIG_HUGETLB_PAGE */
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* TMP and PTR are scratch .
* TMP will be clobbered , PTR will hold the pmd entry .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_get_pmde64 ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-17 02:20:36 +04:00
unsigned int tmp , unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
/*
* The vmalloc handling is not in the hotpath .
*/
2008-01-28 23:05:38 +03:00
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ;
uasm_il_bltz ( p , r , tmp , label_vmalloc ) ;
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_SMP
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as " CPU " index
*/
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( p , ptr , C0_TCBIND ) ;
uasm_i_dsrl ( p , ptr , ptr , 19 ) ;
2006-04-05 12:45:45 +04:00
# else
2005-04-17 02:20:36 +04:00
/*
2005-04-01 18:07:13 +04:00
* 64 bit SMP running in XKPHYS has smp_processor_id ( ) < < 3
2005-04-17 02:20:36 +04:00
* stored in CONTEXT .
*/
2008-01-28 23:05:38 +03:00
uasm_i_dmfc0 ( p , ptr , C0_CONTEXT ) ;
uasm_i_dsrl ( p , ptr , ptr , 23 ) ;
2006-04-05 12:45:45 +04:00
# endif
2008-01-28 23:05:38 +03:00
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ;
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ;
uasm_i_ld ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
2005-04-17 02:20:36 +04:00
# else
2008-01-28 23:05:38 +03:00
UASM_i_LA_mostly ( p , ptr , pgdc ) ;
uasm_i_ld ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
2005-04-17 02:20:36 +04:00
# endif
2008-01-28 23:05:38 +03:00
uasm_l_vmalloc_done ( l , * p ) ;
2006-10-24 05:29:01 +04:00
if ( PGDIR_SHIFT - 3 < 32 ) /* get pgd offset in bytes */
2008-01-28 23:05:38 +03:00
uasm_i_dsrl ( p , tmp , tmp , PGDIR_SHIFT - 3 ) ;
2006-10-24 05:29:01 +04:00
else
2008-01-28 23:05:38 +03:00
uasm_i_dsrl32 ( p , tmp , tmp , PGDIR_SHIFT - 3 - 32 ) ;
uasm_i_andi ( p , tmp , tmp , ( PTRS_PER_PGD - 1 ) < < 3 ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ; /* add in pgd offset */
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ; /* get faulting address */
uasm_i_ld ( p , ptr , 0 , ptr ) ; /* get pmd pointer */
uasm_i_dsrl ( p , tmp , tmp , PMD_SHIFT - 3 ) ; /* get pmd offset in bytes */
uasm_i_andi ( p , tmp , tmp , ( PTRS_PER_PMD - 1 ) < < 3 ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ; /* add in pmd offset */
2005-04-17 02:20:36 +04:00
}
/*
* BVADDR is the faulting address , PTR is scratch .
* PTR will hold the pgd for vmalloc .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_get_pgd_vmalloc64 ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-17 02:20:36 +04:00
unsigned int bvaddr , unsigned int ptr )
{
long swpd = ( long ) swapper_pg_dir ;
2008-01-28 23:05:38 +03:00
uasm_l_vmalloc ( l , * p ) ;
2005-04-17 02:20:36 +04:00
2008-01-28 23:05:38 +03:00
if ( uasm_in_compat_space_p ( swpd ) & & ! uasm_rel_lo ( swpd ) ) {
uasm_il_b ( p , r , label_vmalloc_done ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( swpd ) ) ;
2005-04-17 02:20:36 +04:00
} else {
2008-01-28 23:05:38 +03:00
UASM_i_LA_mostly ( p , ptr , swpd ) ;
uasm_il_b ( p , r , label_vmalloc_done ) ;
if ( uasm_in_compat_space_p ( swpd ) )
uasm_i_addiu ( p , ptr , ptr , uasm_rel_lo ( swpd ) ) ;
2007-10-23 15:43:25 +04:00
else
2008-01-28 23:05:38 +03:00
uasm_i_daddiu ( p , ptr , ptr , uasm_rel_lo ( swpd ) ) ;
2005-04-17 02:20:36 +04:00
}
}
2005-09-04 02:56:16 +04:00
# else /* !CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
/*
* TMP and PTR are scratch .
* TMP will be clobbered , PTR will hold the pgd entry .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit __maybe_unused
2005-04-17 02:20:36 +04:00
build_get_pgde32 ( u32 * * p , unsigned int tmp , unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
# ifdef CONFIG_SMP
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as " CPU " index
*/
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( p , ptr , C0_TCBIND ) ;
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_srl ( p , ptr , ptr , 19 ) ;
2006-04-05 12:45:45 +04:00
# else
/*
* smp_processor_id ( ) < < 3 is stored in CONTEXT .
*/
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( p , ptr , C0_CONTEXT ) ;
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_srl ( p , ptr , ptr , 23 ) ;
2006-04-05 12:45:45 +04:00
# endif
2008-01-28 23:05:38 +03:00
uasm_i_addu ( p , ptr , tmp , ptr ) ;
2005-04-17 02:20:36 +04:00
# else
2008-01-28 23:05:38 +03:00
UASM_i_LA_mostly ( p , ptr , pgdc ) ;
2005-04-17 02:20:36 +04:00
# endif
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( p , tmp , C0_BADVADDR ) ; /* get faulting address */
uasm_i_lw ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
uasm_i_srl ( p , tmp , tmp , PGDIR_SHIFT ) ; /* get pgd only bits */
uasm_i_sll ( p , tmp , tmp , PGD_T_LOG2 ) ;
uasm_i_addu ( p , ptr , ptr , tmp ) ; /* add in pgd offset */
2005-04-17 02:20:36 +04:00
}
2005-09-04 02:56:16 +04:00
# endif /* !CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
2008-03-08 12:56:28 +03:00
static void __cpuinit build_adjust_context ( u32 * * p , unsigned int ctx )
2005-04-17 02:20:36 +04:00
{
2006-10-24 05:29:01 +04:00
unsigned int shift = 4 - ( PTE_T_LOG2 + 1 ) + PAGE_SHIFT - 12 ;
2005-04-17 02:20:36 +04:00
unsigned int mask = ( PTRS_PER_PTE / 2 - 1 ) < < ( PTE_T_LOG2 + 1 ) ;
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2005-04-17 02:20:36 +04:00
case CPU_VR41XX :
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4131 :
case CPU_VR4181 :
case CPU_VR4181A :
case CPU_VR4133 :
shift + = 2 ;
break ;
default :
break ;
}
if ( shift )
2008-01-28 23:05:38 +03:00
UASM_i_SRL ( p , ctx , ctx , shift ) ;
uasm_i_andi ( p , ctx , ctx , mask ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_get_ptep ( u32 * * p , unsigned int tmp , unsigned int ptr )
2005-04-17 02:20:36 +04:00
{
/*
* Bug workaround for the Nevada . It seems as if under certain
* circumstances the move from cp0_context might produce a
* bogus result when the mfc0 instruction and its consumer are
* in a different cacheline or a load instruction , probably any
* memory reference , is between them .
*/
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2005-04-17 02:20:36 +04:00
case CPU_NEVADA :
2008-01-28 23:05:38 +03:00
UASM_i_LW ( p , ptr , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
GET_CONTEXT ( p , tmp ) ; /* get context reg */
break ;
default :
GET_CONTEXT ( p , tmp ) ; /* get context reg */
2008-01-28 23:05:38 +03:00
UASM_i_LW ( p , ptr , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
break ;
}
build_adjust_context ( p , tmp ) ;
2008-01-28 23:05:38 +03:00
UASM_i_ADDU ( p , ptr , ptr , tmp ) ; /* add in offset */
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_update_entries ( u32 * * p , unsigned int tmp ,
2005-04-17 02:20:36 +04:00
unsigned int ptep )
{
/*
* 64 bit address support ( 36 bit on a 32 bit CPU ) in a 32 bit
* Kernel is a special case . Only a few CPUs use it .
*/
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits ) {
2008-01-28 23:05:38 +03:00
uasm_i_ld ( p , tmp , 0 , ptep ) ; /* get even pte */
uasm_i_ld ( p , ptep , sizeof ( pte_t ) , ptep ) ; /* get odd pte */
uasm_i_dsrl ( p , tmp , tmp , 6 ) ; /* convert to entrylo0 */
uasm_i_mtc0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
uasm_i_dsrl ( p , ptep , ptep , 6 ) ; /* convert to entrylo1 */
uasm_i_mtc0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-17 02:20:36 +04:00
} else {
int pte_off_even = sizeof ( pte_t ) / 2 ;
int pte_off_odd = pte_off_even + sizeof ( pte_t ) ;
/* The pte entries are pre-shifted */
2008-01-28 23:05:38 +03:00
uasm_i_lw ( p , tmp , pte_off_even , ptep ) ; /* get even pte */
uasm_i_mtc0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
uasm_i_lw ( p , ptep , pte_off_odd , ptep ) ; /* get odd pte */
uasm_i_mtc0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-17 02:20:36 +04:00
}
# else
2008-01-28 23:05:38 +03:00
UASM_i_LW ( p , tmp , 0 , ptep ) ; /* get even pte */
UASM_i_LW ( p , ptep , sizeof ( pte_t ) , ptep ) ; /* get odd pte */
2005-04-17 02:20:36 +04:00
if ( r45k_bvahwbug ( ) )
build_tlb_probe_entry ( p ) ;
2008-01-28 23:05:38 +03:00
UASM_i_SRL ( p , tmp , tmp , 6 ) ; /* convert to entrylo0 */
2005-04-17 02:20:36 +04:00
if ( r4k_250MHZhwbug ( ) )
2008-01-28 23:05:38 +03:00
uasm_i_mtc0 ( p , 0 , C0_ENTRYLO0 ) ;
uasm_i_mtc0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
UASM_i_SRL ( p , ptep , ptep , 6 ) ; /* convert to entrylo1 */
2005-04-17 02:20:36 +04:00
if ( r45k_bvahwbug ( ) )
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( p , tmp , C0_INDEX ) ;
2005-04-17 02:20:36 +04:00
if ( r4k_250MHZhwbug ( ) )
2008-01-28 23:05:38 +03:00
uasm_i_mtc0 ( p , 0 , C0_ENTRYLO1 ) ;
uasm_i_mtc0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-17 02:20:36 +04:00
# endif
}
2009-05-20 22:40:58 +04:00
/*
* For a 64 - bit kernel , we are using the 64 - bit XTLB refill exception
* because EXL = = 0. If we wrap , we can also use the 32 instruction
* slots before the XTLB refill exception handler which belong to the
* unused TLB refill exception .
*/
# define MIPS64_REFILL_INSNS 32
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r4000_tlb_refill_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = tlb_handler ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
u32 * f ;
unsigned int final_len ;
memset ( tlb_handler , 0 , sizeof ( tlb_handler ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
memset ( final_handler , 0 , sizeof ( final_handler ) ) ;
/*
* create the plain linear handler
*/
if ( bcm1250_m3_war ( ) ) {
2008-01-28 23:05:38 +03:00
UASM_i_MFC0 ( & p , K0 , C0_BADVADDR ) ;
UASM_i_MFC0 ( & p , K1 , C0_ENTRYHI ) ;
uasm_i_xor ( & p , K0 , K0 , K1 ) ;
UASM_i_SRL ( & p , K0 , K0 , PAGE_SHIFT + 1 ) ;
uasm_il_bnez ( & p , & r , K0 , label_leave ) ;
/* No need for uasm_i_nop */
2005-04-17 02:20:36 +04:00
}
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
build_get_pmde64 ( & p , & l , & r , K0 , K1 ) ; /* get pmd in K1 */
# else
build_get_pgde32 ( & p , K0 , K1 ) ; /* get pgd in K1 */
# endif
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
build_is_huge_pte ( & p , & r , K0 , K1 , label_tlb_huge_update ) ;
# endif
2005-04-17 02:20:36 +04:00
build_get_ptep ( & p , K0 , K1 ) ;
build_update_entries ( & p , K0 , K1 ) ;
build_tlb_write_entry ( & p , & l , & r , tlb_random ) ;
2008-01-28 23:05:38 +03:00
uasm_l_leave ( & l , p ) ;
uasm_i_eret ( & p ) ; /* return from trap */
2005-04-17 02:20:36 +04:00
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
uasm_l_tlb_huge_update ( & l , p ) ;
UASM_i_LW ( & p , K0 , 0 , K1 ) ;
build_huge_update_entries ( & p , K0 , K1 ) ;
build_huge_tlb_write_entry ( & p , & l , & r , K0 , tlb_random ) ;
# endif
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
build_get_pgd_vmalloc64 ( & p , & l , & r , K0 , K1 ) ;
# endif
/*
* Overflow check : For the 64 bit handler , we need at least one
* free instruction slot for the wrap - around branch . In worst
* case , if the intended insertion point is a delay slot , we
2006-10-04 00:21:02 +04:00
* need three , with the second nop ' ed and the third being
2005-04-17 02:20:36 +04:00
* unused .
*/
2007-06-06 10:52:43 +04:00
/* Loongson2 ebase is different than r4k, we have more space */
# if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
2005-04-17 02:20:36 +04:00
if ( ( p - tlb_handler ) > 64 )
panic ( " TLB refill handler space exceeded " ) ;
# else
2009-05-20 22:40:58 +04:00
if ( ( ( p - tlb_handler ) > ( MIPS64_REFILL_INSNS * 2 ) - 1 )
| | ( ( ( p - tlb_handler ) > ( MIPS64_REFILL_INSNS * 2 ) - 3 )
& & uasm_insn_has_bdelay ( relocs ,
tlb_handler + MIPS64_REFILL_INSNS - 3 ) ) )
2005-04-17 02:20:36 +04:00
panic ( " TLB refill handler space exceeded " ) ;
# endif
/*
* Now fold the handler in the TLB refill handler space .
*/
2007-06-06 10:52:43 +04:00
# if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
2005-04-17 02:20:36 +04:00
f = final_handler ;
/* Simplest case, just copy the handler. */
2008-01-28 23:05:38 +03:00
uasm_copy_handler ( relocs , labels , tlb_handler , p , f ) ;
2005-04-17 02:20:36 +04:00
final_len = p - tlb_handler ;
2005-09-04 02:56:16 +04:00
# else /* CONFIG_64BIT */
2009-05-20 22:40:58 +04:00
f = final_handler + MIPS64_REFILL_INSNS ;
if ( ( p - tlb_handler ) < = MIPS64_REFILL_INSNS ) {
2005-04-17 02:20:36 +04:00
/* Just copy the handler. */
2008-01-28 23:05:38 +03:00
uasm_copy_handler ( relocs , labels , tlb_handler , p , f ) ;
2005-04-17 02:20:36 +04:00
final_len = p - tlb_handler ;
} else {
2009-05-28 04:47:44 +04:00
# if defined(CONFIG_HUGETLB_PAGE)
const enum label_id ls = label_tlb_huge_update ;
# elif defined(MODULE_START)
2009-05-20 22:40:59 +04:00
const enum label_id ls = label_module_alloc ;
# else
const enum label_id ls = label_vmalloc ;
# endif
u32 * split ;
int ov = 0 ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( labels ) & & labels [ i ] . lab ! = ls ; i + + )
;
BUG_ON ( i = = ARRAY_SIZE ( labels ) ) ;
split = labels [ i ] . addr ;
2005-04-17 02:20:36 +04:00
/*
2009-05-20 22:40:59 +04:00
* See if we have overflown one way or the other .
2005-04-17 02:20:36 +04:00
*/
2009-05-20 22:40:59 +04:00
if ( split > tlb_handler + MIPS64_REFILL_INSNS | |
split < p - MIPS64_REFILL_INSNS )
ov = 1 ;
if ( ov ) {
/*
* Split two instructions before the end . One
* for the branch and one for the instruction
* in the delay slot .
*/
split = tlb_handler + MIPS64_REFILL_INSNS - 2 ;
/*
* If the branch would fall in a delay slot ,
* we must back up an additional instruction
* so that it is no longer in a delay slot .
*/
if ( uasm_insn_has_bdelay ( relocs , split - 1 ) )
split - - ;
}
2005-04-17 02:20:36 +04:00
/* Copy first part of the handler. */
2008-01-28 23:05:38 +03:00
uasm_copy_handler ( relocs , labels , tlb_handler , split , f ) ;
2005-04-17 02:20:36 +04:00
f + = split - tlb_handler ;
2009-05-20 22:40:59 +04:00
if ( ov ) {
/* Insert branch. */
uasm_l_split ( & l , final_handler ) ;
uasm_il_b ( & f , & r , label_split ) ;
if ( uasm_insn_has_bdelay ( relocs , split ) )
uasm_i_nop ( & f ) ;
else {
uasm_copy_handler ( relocs , labels ,
split , split + 1 , f ) ;
uasm_move_labels ( labels , f , f + 1 , - 1 ) ;
f + + ;
split + + ;
}
2005-04-17 02:20:36 +04:00
}
/* Copy the rest of the handler. */
2008-01-28 23:05:38 +03:00
uasm_copy_handler ( relocs , labels , split , p , final_handler ) ;
2009-05-20 22:40:58 +04:00
final_len = ( f - ( final_handler + MIPS64_REFILL_INSNS ) ) +
( p - split ) ;
2005-04-17 02:20:36 +04:00
}
2005-09-04 02:56:16 +04:00
# endif /* CONFIG_64BIT */
2005-04-17 02:20:36 +04:00
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB refill handler (%u instructions). \n " ,
final_len ) ;
2005-04-17 02:20:36 +04:00
2006-03-29 21:53:00 +04:00
memcpy ( ( void * ) ebase , final_handler , 0x100 ) ;
2007-10-18 11:11:17 +04:00
dump_handler ( ( u32 * ) ebase , 64 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* TLB load / store / modify handlers .
*
* Only the fastpath gets synthesized at runtime , the slowpath for
* do_page_fault remains normal asm .
*/
extern void tlb_do_page_fault_0 ( void ) ;
extern void tlb_do_page_fault_1 ( void ) ;
/*
* 128 instructions for the fastpath handler is generous and should
* never be exceeded .
*/
# define FASTPATH_SIZE 128
2007-10-18 11:11:16 +04:00
u32 handle_tlbl [ FASTPATH_SIZE ] __cacheline_aligned ;
u32 handle_tlbs [ FASTPATH_SIZE ] __cacheline_aligned ;
u32 handle_tlbm [ FASTPATH_SIZE ] __cacheline_aligned ;
2005-04-17 02:20:36 +04:00
2008-03-08 12:56:28 +03:00
static void __cpuinit
2009-05-09 02:10:50 +04:00
iPTE_LW ( u32 * * p , unsigned int pte , unsigned int ptr )
2005-04-17 02:20:36 +04:00
{
# ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 23:05:38 +03:00
uasm_i_lld ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
else
# endif
2008-01-28 23:05:38 +03:00
UASM_i_LL ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
# else
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 23:05:38 +03:00
uasm_i_ld ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
else
# endif
2008-01-28 23:05:38 +03:00
UASM_i_LW ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
# endif
}
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
iPTE_SW ( u32 * * p , struct uasm_reloc * * r , unsigned int pte , unsigned int ptr ,
2005-04-28 12:52:57 +04:00
unsigned int mode )
2005-04-17 02:20:36 +04:00
{
2005-04-28 12:52:57 +04:00
# ifdef CONFIG_64BIT_PHYS_ADDR
unsigned int hwmode = mode & ( _PAGE_VALID | _PAGE_DIRTY ) ;
# endif
2008-01-28 23:05:38 +03:00
uasm_i_ori ( p , pte , pte , mode ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 23:05:38 +03:00
uasm_i_scd ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
else
# endif
2008-01-28 23:05:38 +03:00
UASM_i_SC ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
if ( r10000_llsc_war ( ) )
2008-01-28 23:05:38 +03:00
uasm_il_beqzl ( p , r , pte , label_smp_pgtable_change ) ;
2005-04-17 02:20:36 +04:00
else
2008-01-28 23:05:38 +03:00
uasm_il_beqz ( p , r , pte , label_smp_pgtable_change ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( ! cpu_has_64bits ) {
2008-01-28 23:05:38 +03:00
/* no uasm_i_nop needed */
uasm_i_ll ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_ori ( p , pte , pte , hwmode ) ;
uasm_i_sc ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_il_beqz ( p , r , pte , label_smp_pgtable_change ) ;
/* no uasm_i_nop needed */
uasm_i_lw ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
} else
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
# else
2008-01-28 23:05:38 +03:00
uasm_i_nop ( p ) ;
2005-04-17 02:20:36 +04:00
# endif
# else
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 23:05:38 +03:00
uasm_i_sd ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
else
# endif
2008-01-28 23:05:38 +03:00
UASM_i_SW ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( ! cpu_has_64bits ) {
2008-01-28 23:05:38 +03:00
uasm_i_lw ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_ori ( p , pte , pte , hwmode ) ;
uasm_i_sw ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_lw ( p , pte , 0 , ptr ) ;
2005-04-17 02:20:36 +04:00
}
# endif
# endif
}
/*
* Check if PTE is present , if not then jump to LABEL . PTR points to
* the page table where this PTE is located , PTE will be re - loaded
* with it ' s original value .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2009-05-09 02:10:50 +04:00
build_pte_present ( u32 * * p , struct uasm_reloc * * r ,
2005-04-17 02:20:36 +04:00
unsigned int pte , unsigned int ptr , enum label_id lid )
{
2008-01-28 23:05:38 +03:00
uasm_i_andi ( p , pte , pte , _PAGE_PRESENT | _PAGE_READ ) ;
uasm_i_xori ( p , pte , pte , _PAGE_PRESENT | _PAGE_READ ) ;
uasm_il_bnez ( p , r , pte , lid ) ;
2009-05-09 02:10:50 +04:00
iPTE_LW ( p , pte , ptr ) ;
2005-04-17 02:20:36 +04:00
}
/* Make PTE valid, store result in PTR. */
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_make_valid ( u32 * * p , struct uasm_reloc * * r , unsigned int pte ,
2005-04-17 02:20:36 +04:00
unsigned int ptr )
{
2005-04-28 12:52:57 +04:00
unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED ;
iPTE_SW ( p , r , pte , ptr , mode ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Check if PTE can be written to , if not branch to LABEL . Regardless
* restore PTE with value from PTR when done .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2009-05-09 02:10:50 +04:00
build_pte_writable ( u32 * * p , struct uasm_reloc * * r ,
2005-04-17 02:20:36 +04:00
unsigned int pte , unsigned int ptr , enum label_id lid )
{
2008-01-28 23:05:38 +03:00
uasm_i_andi ( p , pte , pte , _PAGE_PRESENT | _PAGE_WRITE ) ;
uasm_i_xori ( p , pte , pte , _PAGE_PRESENT | _PAGE_WRITE ) ;
uasm_il_bnez ( p , r , pte , lid ) ;
2009-05-09 02:10:50 +04:00
iPTE_LW ( p , pte , ptr ) ;
2005-04-17 02:20:36 +04:00
}
/* Make PTE writable, update software status bits as well, then store
* at PTR .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_make_write ( u32 * * p , struct uasm_reloc * * r , unsigned int pte ,
2005-04-17 02:20:36 +04:00
unsigned int ptr )
{
2005-04-28 12:52:57 +04:00
unsigned int mode = ( _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
| _PAGE_DIRTY ) ;
iPTE_SW ( p , r , pte , ptr , mode ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Check if PTE can be modified , if not branch to LABEL . Regardless
* restore PTE with value from PTR when done .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2009-05-09 02:10:50 +04:00
build_pte_modifiable ( u32 * * p , struct uasm_reloc * * r ,
2005-04-17 02:20:36 +04:00
unsigned int pte , unsigned int ptr , enum label_id lid )
{
2008-01-28 23:05:38 +03:00
uasm_i_andi ( p , pte , pte , _PAGE_WRITE ) ;
uasm_il_beqz ( p , r , pte , lid ) ;
2009-05-09 02:10:50 +04:00
iPTE_LW ( p , pte , ptr ) ;
2005-04-17 02:20:36 +04:00
}
/*
* R3000 style TLB load / store / modify handlers .
*/
2005-06-14 00:24:00 +04:00
/*
* This places the pte into ENTRYLO0 and writes it with tlbwi .
* Then it returns .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2005-06-14 00:24:00 +04:00
build_r3000_pte_reload_tlbwi ( u32 * * p , unsigned int pte , unsigned int tmp )
2005-04-17 02:20:36 +04:00
{
2008-01-28 23:05:38 +03:00
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* cp0 delay */
uasm_i_mfc0 ( p , tmp , C0_EPC ) ; /* cp0 delay */
uasm_i_tlbwi ( p ) ;
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
2005-04-17 02:20:36 +04:00
}
/*
2005-06-14 00:24:00 +04:00
* This places the pte into ENTRYLO0 and writes it with tlbwi
* or tlbwr as appropriate . This is because the index register
* may have the probe fail bit set as a result of a trap on a
* kseg2 access , i . e . without refill . Then it returns .
2005-04-17 02:20:36 +04:00
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_r3000_tlb_reload_write ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int pte ,
unsigned int tmp )
{
uasm_i_mfc0 ( p , tmp , C0_INDEX ) ;
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* cp0 delay */
uasm_il_bltz ( p , r , tmp , label_r3000_write_probe_fail ) ; /* cp0 delay */
uasm_i_mfc0 ( p , tmp , C0_EPC ) ; /* branch delay */
uasm_i_tlbwi ( p ) ; /* cp0 delay */
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
uasm_l_r3000_write_probe_fail ( l , * p ) ;
uasm_i_tlbwr ( p ) ; /* cp0 delay */
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit
2005-04-17 02:20:36 +04:00
build_r3000_tlbchange_handler_head ( u32 * * p , unsigned int pte ,
unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
2008-01-28 23:05:38 +03:00
uasm_i_mfc0 ( p , pte , C0_BADVADDR ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( pgdc ) ) ; /* cp0 delay */
uasm_i_lw ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
uasm_i_srl ( p , pte , pte , 22 ) ; /* load delay */
uasm_i_sll ( p , pte , pte , 2 ) ;
uasm_i_addu ( p , ptr , ptr , pte ) ;
uasm_i_mfc0 ( p , pte , C0_CONTEXT ) ;
uasm_i_lw ( p , ptr , 0 , ptr ) ; /* cp0 delay */
uasm_i_andi ( p , pte , pte , 0xffc ) ; /* load delay */
uasm_i_addu ( p , ptr , ptr , pte ) ;
uasm_i_lw ( p , pte , 0 , ptr ) ;
uasm_i_tlbp ( p ) ; /* load delay */
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r3000_tlb_load_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = handle_tlbl ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
memset ( handle_tlbl , 0 , sizeof ( handle_tlbl ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
2009-05-09 02:10:50 +04:00
build_pte_present ( & p , & r , K0 , K1 , label_nopage_tlbl ) ;
2008-01-28 23:05:38 +03:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-17 02:20:36 +04:00
build_make_valid ( & p , & r , K0 , K1 ) ;
2005-06-14 00:24:00 +04:00
build_r3000_tlb_reload_write ( & p , & l , & r , K0 , K1 ) ;
2005-04-17 02:20:36 +04:00
2008-01-28 23:05:38 +03:00
uasm_l_nopage_tlbl ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_0 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-17 02:20:36 +04:00
if ( ( p - handle_tlbl ) > FASTPATH_SIZE )
panic ( " TLB load handler fastpath space exceeded " ) ;
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB load handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbl ) ) ;
2005-04-17 02:20:36 +04:00
2007-10-18 11:11:17 +04:00
dump_handler ( handle_tlbl , ARRAY_SIZE ( handle_tlbl ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r3000_tlb_store_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = handle_tlbs ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
memset ( handle_tlbs , 0 , sizeof ( handle_tlbs ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
2009-05-09 02:10:50 +04:00
build_pte_writable ( & p , & r , K0 , K1 , label_nopage_tlbs ) ;
2008-01-28 23:05:38 +03:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-17 02:20:36 +04:00
build_make_write ( & p , & r , K0 , K1 ) ;
2005-06-14 00:24:00 +04:00
build_r3000_tlb_reload_write ( & p , & l , & r , K0 , K1 ) ;
2005-04-17 02:20:36 +04:00
2008-01-28 23:05:38 +03:00
uasm_l_nopage_tlbs ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-17 02:20:36 +04:00
if ( ( p - handle_tlbs ) > FASTPATH_SIZE )
panic ( " TLB store handler fastpath space exceeded " ) ;
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB store handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbs ) ) ;
2005-04-17 02:20:36 +04:00
2007-10-18 11:11:17 +04:00
dump_handler ( handle_tlbs , ARRAY_SIZE ( handle_tlbs ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r3000_tlb_modify_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = handle_tlbm ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
memset ( handle_tlbm , 0 , sizeof ( handle_tlbm ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
2009-05-09 02:10:50 +04:00
build_pte_modifiable ( & p , & r , K0 , K1 , label_nopage_tlbm ) ;
2008-01-28 23:05:38 +03:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-17 02:20:36 +04:00
build_make_write ( & p , & r , K0 , K1 ) ;
2005-06-14 00:24:00 +04:00
build_r3000_pte_reload_tlbwi ( & p , K0 , K1 ) ;
2005-04-17 02:20:36 +04:00
2008-01-28 23:05:38 +03:00
uasm_l_nopage_tlbm ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-17 02:20:36 +04:00
if ( ( p - handle_tlbm ) > FASTPATH_SIZE )
panic ( " TLB modify handler fastpath space exceeded " ) ;
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB modify handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbm ) ) ;
2005-04-17 02:20:36 +04:00
2007-10-18 11:11:17 +04:00
dump_handler ( handle_tlbm , ARRAY_SIZE ( handle_tlbm ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
* R4000 style TLB load / store / modify handlers .
*/
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_r4000_tlbchange_handler_head ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int pte ,
2005-04-17 02:20:36 +04:00
unsigned int ptr )
{
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
build_get_pmde64 ( p , l , r , pte , ptr ) ; /* get pmd in ptr */
# else
build_get_pgde32 ( p , pte , ptr ) ; /* get pgd in ptr */
# endif
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* For huge tlb entries , pmd doesn ' t contain an address but
* instead contains the tlb pte . Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing .
*/
build_is_huge_pte ( p , r , pte , ptr , label_tlb_huge_update ) ;
# endif
2008-01-28 23:05:38 +03:00
UASM_i_MFC0 ( p , pte , C0_BADVADDR ) ;
UASM_i_LW ( p , ptr , 0 , ptr ) ;
UASM_i_SRL ( p , pte , pte , PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 ) ;
uasm_i_andi ( p , pte , pte , ( PTRS_PER_PTE - 1 ) < < PTE_T_LOG2 ) ;
UASM_i_ADDU ( p , ptr , ptr , pte ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_SMP
2008-01-28 23:05:38 +03:00
uasm_l_smp_pgtable_change ( l , * p ) ;
# endif
2009-05-09 02:10:50 +04:00
iPTE_LW ( p , pte , ptr ) ; /* get even pte */
2006-08-23 17:26:50 +04:00
if ( ! m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( p ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit
2008-01-28 23:05:38 +03:00
build_r4000_tlbchange_handler_tail ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int tmp ,
2005-04-17 02:20:36 +04:00
unsigned int ptr )
{
2008-01-28 23:05:38 +03:00
uasm_i_ori ( p , ptr , ptr , sizeof ( pte_t ) ) ;
uasm_i_xori ( p , ptr , ptr , sizeof ( pte_t ) ) ;
2005-04-17 02:20:36 +04:00
build_update_entries ( p , tmp , ptr ) ;
build_tlb_write_entry ( p , l , r , tlb_indexed ) ;
2008-01-28 23:05:38 +03:00
uasm_l_leave ( l , * p ) ;
uasm_i_eret ( p ) ; /* return from trap */
2005-04-17 02:20:36 +04:00
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
build_get_pgd_vmalloc64 ( p , l , r , tmp , ptr ) ;
# endif
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r4000_tlb_load_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = handle_tlbl ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
memset ( handle_tlbl , 0 , sizeof ( handle_tlbl ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
if ( bcm1250_m3_war ( ) ) {
2008-01-28 23:05:38 +03:00
UASM_i_MFC0 ( & p , K0 , C0_BADVADDR ) ;
UASM_i_MFC0 ( & p , K1 , C0_ENTRYHI ) ;
uasm_i_xor ( & p , K0 , K0 , K1 ) ;
UASM_i_SRL ( & p , K0 , K0 , PAGE_SHIFT + 1 ) ;
uasm_il_bnez ( & p , & r , K0 , label_leave ) ;
/* No need for uasm_i_nop */
2005-04-17 02:20:36 +04:00
}
build_r4000_tlbchange_handler_head ( & p , & l , & r , K0 , K1 ) ;
2009-05-09 02:10:50 +04:00
build_pte_present ( & p , & r , K0 , K1 , label_nopage_tlbl ) ;
2006-08-23 17:26:50 +04:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-17 02:20:36 +04:00
build_make_valid ( & p , & r , K0 , K1 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , K0 , K1 ) ;
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page .
*/
uasm_l_tlb_huge_update ( & l , p ) ;
iPTE_LW ( & p , K0 , K1 ) ;
build_pte_present ( & p , & r , K0 , K1 , label_nopage_tlbl ) ;
build_tlb_probe_entry ( & p ) ;
uasm_i_ori ( & p , K0 , K0 , ( _PAGE_ACCESSED | _PAGE_VALID ) ) ;
build_huge_handler_tail ( & p , & r , & l , K0 , K1 ) ;
# endif
2008-01-28 23:05:38 +03:00
uasm_l_nopage_tlbl ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_0 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-17 02:20:36 +04:00
if ( ( p - handle_tlbl ) > FASTPATH_SIZE )
panic ( " TLB load handler fastpath space exceeded " ) ;
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB load handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbl ) ) ;
2005-04-17 02:20:36 +04:00
2007-10-18 11:11:17 +04:00
dump_handler ( handle_tlbl , ARRAY_SIZE ( handle_tlbl ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r4000_tlb_store_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = handle_tlbs ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
memset ( handle_tlbs , 0 , sizeof ( handle_tlbs ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r4000_tlbchange_handler_head ( & p , & l , & r , K0 , K1 ) ;
2009-05-09 02:10:50 +04:00
build_pte_writable ( & p , & r , K0 , K1 , label_nopage_tlbs ) ;
2006-08-23 17:26:50 +04:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-17 02:20:36 +04:00
build_make_write ( & p , & r , K0 , K1 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , K0 , K1 ) ;
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page .
*/
uasm_l_tlb_huge_update ( & l , p ) ;
iPTE_LW ( & p , K0 , K1 ) ;
build_pte_writable ( & p , & r , K0 , K1 , label_nopage_tlbs ) ;
build_tlb_probe_entry ( & p ) ;
uasm_i_ori ( & p , K0 , K0 ,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY ) ;
build_huge_handler_tail ( & p , & r , & l , K0 , K1 ) ;
# endif
2008-01-28 23:05:38 +03:00
uasm_l_nopage_tlbs ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-17 02:20:36 +04:00
if ( ( p - handle_tlbs ) > FASTPATH_SIZE )
panic ( " TLB store handler fastpath space exceeded " ) ;
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB store handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbs ) ) ;
2005-04-17 02:20:36 +04:00
2007-10-18 11:11:17 +04:00
dump_handler ( handle_tlbs , ARRAY_SIZE ( handle_tlbs ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
static void __cpuinit build_r4000_tlb_modify_handler ( void )
2005-04-17 02:20:36 +04:00
{
u32 * p = handle_tlbm ;
2008-01-28 23:05:38 +03:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-17 02:20:36 +04:00
memset ( handle_tlbm , 0 , sizeof ( handle_tlbm ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r4000_tlbchange_handler_head ( & p , & l , & r , K0 , K1 ) ;
2009-05-09 02:10:50 +04:00
build_pte_modifiable ( & p , & r , K0 , K1 , label_nopage_tlbm ) ;
2006-08-23 17:26:50 +04:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-17 02:20:36 +04:00
/* Present and writable bits set, set accessed and dirty bits. */
build_make_write ( & p , & r , K0 , K1 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , K0 , K1 ) ;
2009-05-28 04:47:44 +04:00
# ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page .
*/
uasm_l_tlb_huge_update ( & l , p ) ;
iPTE_LW ( & p , K0 , K1 ) ;
build_pte_modifiable ( & p , & r , K0 , K1 , label_nopage_tlbm ) ;
build_tlb_probe_entry ( & p ) ;
uasm_i_ori ( & p , K0 , K0 ,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY ) ;
build_huge_handler_tail ( & p , & r , & l , K0 , K1 ) ;
# endif
2008-01-28 23:05:38 +03:00
uasm_l_nopage_tlbm ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-17 02:20:36 +04:00
if ( ( p - handle_tlbm ) > FASTPATH_SIZE )
panic ( " TLB modify handler fastpath space exceeded " ) ;
2008-01-28 23:05:38 +03:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB modify handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbm ) ) ;
2006-07-09 04:47:06 +04:00
2007-10-18 11:11:17 +04:00
dump_handler ( handle_tlbm , ARRAY_SIZE ( handle_tlbm ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-08 12:56:28 +03:00
void __cpuinit build_tlb_refill_handler ( void )
2005-04-17 02:20:36 +04:00
{
/*
* The refill handler is generated per - CPU , multi - node systems
* may have local storage for it . The other handlers are only
* needed once .
*/
static int run_once = 0 ;
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2005-04-17 02:20:36 +04:00
case CPU_R2000 :
case CPU_R3000 :
case CPU_R3000A :
case CPU_R3081E :
case CPU_TX3912 :
case CPU_TX3922 :
case CPU_TX3927 :
build_r3000_tlb_refill_handler ( ) ;
if ( ! run_once ) {
build_r3000_tlb_load_handler ( ) ;
build_r3000_tlb_store_handler ( ) ;
build_r3000_tlb_modify_handler ( ) ;
run_once + + ;
}
break ;
case CPU_R6000 :
case CPU_R6000A :
panic ( " No R6000 TLB refill handler yet " ) ;
break ;
case CPU_R8000 :
panic ( " No R8000 TLB refill handler yet " ) ;
break ;
default :
build_r4000_tlb_refill_handler ( ) ;
if ( ! run_once ) {
build_r4000_tlb_load_handler ( ) ;
build_r4000_tlb_store_handler ( ) ;
build_r4000_tlb_modify_handler ( ) ;
run_once + + ;
}
}
}
2005-07-15 19:23:23 +04:00
2008-03-08 12:56:28 +03:00
void __cpuinit flush_tlb_handlers ( void )
2005-07-15 19:23:23 +04:00
{
2008-08-04 22:53:57 +04:00
local_flush_icache_range ( ( unsigned long ) handle_tlbl ,
2005-07-15 19:23:23 +04:00
( unsigned long ) handle_tlbl + sizeof ( handle_tlbl ) ) ;
2008-08-04 22:53:57 +04:00
local_flush_icache_range ( ( unsigned long ) handle_tlbs ,
2005-07-15 19:23:23 +04:00
( unsigned long ) handle_tlbs + sizeof ( handle_tlbs ) ) ;
2008-08-04 22:53:57 +04:00
local_flush_icache_range ( ( unsigned long ) handle_tlbm ,
2005-07-15 19:23:23 +04:00
( unsigned long ) handle_tlbm + sizeof ( handle_tlbm ) ) ;
}