2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Synthesize TLB refill handlers at runtime .
*
2008-01-28 20:05:38 +00:00
* Copyright ( C ) 2004 , 2005 , 2006 , 2008 Thiemo Seufer
2007-10-23 12:43:25 +01:00
* Copyright ( C ) 2005 , 2007 Maciej W . Rozycki
2006-04-05 09:45:45 +01:00
* Copyright ( C ) 2006 Ralf Baechle ( ralf @ linux - mips . org )
*
* . . . and the days got worse and worse and now you see
* I ' ve gone completly out of my mind .
*
* They ' re coming to take me a away haha
* they ' re coming to take me a away hoho hihi haha
* to the funny farm where code is beautiful all the time . . .
*
* ( Condolences to Napoleon XIV )
2005-04-16 15:20:36 -07:00
*/
# include <linux/kernel.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/init.h>
# include <asm/mmu_context.h>
# include <asm/war.h>
2008-01-28 20:05:38 +00:00
# include "uasm.h"
2007-10-11 23:46:14 +01:00
static inline int r45k_bvahwbug ( void )
2005-04-16 15:20:36 -07:00
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0 ;
}
2007-10-11 23:46:14 +01:00
static inline int r4k_250MHZhwbug ( void )
2005-04-16 15:20:36 -07:00
{
/* XXX: We should probe for the presence of this bug, but we don't. */
return 0 ;
}
2007-10-11 23:46:14 +01:00
static inline int __maybe_unused bcm1250_m3_war ( void )
2005-04-16 15:20:36 -07:00
{
return BCM1250_M3_WAR ;
}
2007-10-11 23:46:14 +01:00
static inline int __maybe_unused r10000_llsc_war ( void )
2005-04-16 15:20:36 -07:00
{
return R10000_LLSC_WAR ;
}
2006-08-23 14:26:50 +01:00
/*
* Found by experiment : At least some revisions of the 4 kc throw under
* some circumstances a machine check exception , triggered by invalid
* values in the index register . Delaying the tlbp instruction until
* after the next branch , plus adding an additional nop in front of
* tlbwi / tlbwr avoids the invalid index register values . Nobody knows
* why ; it ' s not an issue caused by the core RTL .
*
*/
2008-03-08 09:56:28 +00:00
static int __cpuinit m4kc_tlbp_war ( void )
2006-08-23 14:26:50 +01:00
{
return ( current_cpu_data . processor_id & 0xffff00 ) = =
( PRID_COMP_MIPS | PRID_IMP_4KC ) ;
}
2008-01-28 20:05:38 +00:00
/* Handle labels (which must be positive integers). */
2005-04-16 15:20:36 -07:00
enum label_id {
2008-01-28 20:05:38 +00:00
label_second_part = 1 ,
2005-04-16 15:20:36 -07:00
label_leave ,
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# ifdef MODULE_START
label_module_alloc ,
# endif
2005-04-16 15:20:36 -07:00
label_vmalloc ,
label_vmalloc_done ,
label_tlbw_hazard ,
label_split ,
label_nopage_tlbl ,
label_nopage_tlbs ,
label_nopage_tlbm ,
label_smp_pgtable_change ,
label_r3000_write_probe_fail ,
} ;
2008-01-28 20:05:38 +00:00
UASM_L_LA ( _second_part )
UASM_L_LA ( _leave )
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# ifdef MODULE_START
2008-01-28 20:05:38 +00:00
UASM_L_LA ( _module_alloc )
2007-10-23 12:43:25 +01:00
# endif
2008-01-28 20:05:38 +00:00
UASM_L_LA ( _vmalloc )
UASM_L_LA ( _vmalloc_done )
UASM_L_LA ( _tlbw_hazard )
UASM_L_LA ( _split )
UASM_L_LA ( _nopage_tlbl )
UASM_L_LA ( _nopage_tlbs )
UASM_L_LA ( _nopage_tlbm )
UASM_L_LA ( _smp_pgtable_change )
UASM_L_LA ( _r3000_write_probe_fail )
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
2007-10-18 09:11:17 +02:00
/*
* For debug purposes .
*/
static inline void dump_handler ( const u32 * handler , int count )
{
int i ;
pr_debug ( " \t .set push \n " ) ;
pr_debug ( " \t .set noreorder \n " ) ;
for ( i = 0 ; i < count ; i + + )
pr_debug ( " \t %p \t .word 0x%08x \n " , & handler [ i ] , handler [ i ] ) ;
pr_debug ( " \t .set pop \n " ) ;
}
2005-04-16 15:20:36 -07:00
/* The only general purpose registers allowed in TLB handlers. */
# define K0 26
# define K1 27
/* Some CP0 registers */
2006-04-05 09:45:45 +01:00
# define C0_INDEX 0, 0
# define C0_ENTRYLO0 2, 0
# define C0_TCBIND 2, 2
# define C0_ENTRYLO1 3, 0
# define C0_CONTEXT 4, 0
# define C0_BADVADDR 8, 0
# define C0_ENTRYHI 10, 0
# define C0_EPC 14, 0
# define C0_XCONTEXT 20, 0
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2008-01-28 20:05:38 +00:00
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
2005-04-16 15:20:36 -07:00
# endif
/* The worst case length of the handler is around 18 instructions for
* R3000 - style TLBs and up to 63 instructions for R4000 - style TLBs .
* Maximum space available is 32 instructions for R3000 and 64
* instructions for R4000 .
*
* We deliberately chose a buffer size of 128 , so we won ' t scribble
* over anything important on overflow before we panic .
*/
2008-03-08 09:56:28 +00:00
static u32 tlb_handler [ 128 ] __cpuinitdata ;
2005-04-16 15:20:36 -07:00
/* simply assume worst case size for labels and relocs */
2008-03-08 09:56:28 +00:00
static struct uasm_label labels [ 128 ] __cpuinitdata ;
static struct uasm_reloc relocs [ 128 ] __cpuinitdata ;
2005-04-16 15:20:36 -07:00
/*
* The R3000 TLB handler is simple .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_refill_handler ( void )
2005-04-16 15:20:36 -07:00
{
long pgdc = ( long ) pgd_current ;
u32 * p ;
memset ( tlb_handler , 0 , sizeof ( tlb_handler ) ) ;
p = tlb_handler ;
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( & p , K0 , C0_BADVADDR ) ;
uasm_i_lui ( & p , K1 , uasm_rel_hi ( pgdc ) ) ; /* cp0 delay */
uasm_i_lw ( & p , K1 , uasm_rel_lo ( pgdc ) , K1 ) ;
uasm_i_srl ( & p , K0 , K0 , 22 ) ; /* load delay */
uasm_i_sll ( & p , K0 , K0 , 2 ) ;
uasm_i_addu ( & p , K1 , K1 , K0 ) ;
uasm_i_mfc0 ( & p , K0 , C0_CONTEXT ) ;
uasm_i_lw ( & p , K1 , 0 , K1 ) ; /* cp0 delay */
uasm_i_andi ( & p , K0 , K0 , 0xffc ) ; /* load delay */
uasm_i_addu ( & p , K1 , K1 , K0 ) ;
uasm_i_lw ( & p , K0 , 0 , K1 ) ;
uasm_i_nop ( & p ) ; /* load delay */
uasm_i_mtc0 ( & p , K0 , C0_ENTRYLO0 ) ;
uasm_i_mfc0 ( & p , K1 , C0_EPC ) ; /* cp0 delay */
uasm_i_tlbwr ( & p ) ; /* cp0 delay */
uasm_i_jr ( & p , K1 ) ;
uasm_i_rfe ( & p ) ; /* branch delay */
2005-04-16 15:20:36 -07:00
if ( p > tlb_handler + 32 )
panic ( " TLB refill handler space exceeded " ) ;
2008-01-28 20:05:38 +00:00
pr_debug ( " Wrote TLB refill handler (%u instructions). \n " ,
( unsigned int ) ( p - tlb_handler ) ) ;
2005-04-16 15:20:36 -07:00
2006-03-29 18:53:00 +01:00
memcpy ( ( void * ) ebase , tlb_handler , 0x80 ) ;
2007-10-18 09:11:17 +02:00
dump_handler ( ( u32 * ) ebase , 32 ) ;
2005-04-16 15:20:36 -07:00
}
/*
* The R4000 TLB handler is much more complicated . We have two
* consecutive handler areas with 32 instructions space each .
* Since they aren ' t used at the same time , we can overflow in the
* other one . To keep things simple , we first assume linear space ,
* then we relocate it to the final handler layout as needed .
*/
2008-03-08 09:56:28 +00:00
static u32 final_handler [ 64 ] __cpuinitdata ;
2005-04-16 15:20:36 -07:00
/*
* Hazards
*
* From the IDT errata for the QED RM5230 ( Nevada ) , processor revision 1.0 :
* 2. A timing hazard exists for the TLBP instruction .
*
* stalling_instruction
* TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction . This is not really correct as the stalling instruction
* can modify the address used to access the JTLB . The failure symptom is that
* the TLBP instruction will use an address created for the stalling instruction
* and not the address held in C0_ENHI and thus report the wrong results .
*
* The software work - around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall .
*
* Errata 2 will not be fixed . This errata is also on the R5000 .
*
* As if we MIPS hackers wouldn ' t know how to nop pipelines happy . . .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit __maybe_unused build_tlb_probe_entry ( u32 * * p )
2005-04-16 15:20:36 -07:00
{
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-09-09 17:11:50 +00:00
/* Found by experiment: R4600 v2.0 needs this, too. */
case CPU_R4600 :
2005-04-16 15:20:36 -07:00
case CPU_R5000 :
case CPU_R5000A :
case CPU_NEVADA :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_tlbp ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
default :
2008-01-28 20:05:38 +00:00
uasm_i_tlbp ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
}
}
/*
* Write random or indexed TLB entry , and care about the hazards from
* the preceeding mtc0 and for the following eret .
*/
enum tlb_write_entry { tlb_random , tlb_indexed } ;
2008-03-08 09:56:28 +00:00
static void __cpuinit build_tlb_write_entry ( u32 * * p , struct uasm_label * * l ,
2008-01-28 20:05:38 +00:00
struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
enum tlb_write_entry wmode )
{
void ( * tlbw ) ( u32 * * ) = NULL ;
switch ( wmode ) {
2008-01-28 20:05:38 +00:00
case tlb_random : tlbw = uasm_i_tlbwr ; break ;
case tlb_indexed : tlbw = uasm_i_tlbwi ; break ;
2005-04-16 15:20:36 -07:00
}
2008-01-29 10:14:54 +00:00
if ( cpu_has_mips_r2 ) {
2008-01-28 20:05:38 +00:00
uasm_i_ehb ( p ) ;
2008-01-29 10:14:54 +00:00
tlbw ( p ) ;
return ;
}
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_R4000PC :
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400PC :
case CPU_R4400SC :
case CPU_R4400MC :
/*
* This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction .
*/
2008-01-28 20:05:38 +00:00
uasm_il_bgezl ( p , r , 0 , label_tlbw_hazard ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_l_tlbw_hazard ( l , * p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_R4600 :
case CPU_R4700 :
case CPU_R5000 :
case CPU_R5000A :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-06-30 10:51:01 +00:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-06-30 10:51:01 +00:00
break ;
case CPU_R4300 :
2005-04-16 15:20:36 -07:00
case CPU_5KC :
case CPU_TX49XX :
case CPU_AU1000 :
case CPU_AU1100 :
case CPU_AU1500 :
case CPU_AU1550 :
2005-03-01 06:33:16 +00:00
case CPU_AU1200 :
2007-12-06 09:07:55 +01:00
case CPU_AU1210 :
case CPU_AU1250 :
2005-07-14 17:47:57 +00:00
case CPU_PR4450 :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
case CPU_R10000 :
case CPU_R12000 :
2006-05-16 22:23:59 -04:00
case CPU_R14000 :
2005-04-16 15:20:36 -07:00
case CPU_4KC :
2008-03-26 16:42:54 +01:00
case CPU_4KEC :
2005-04-16 15:20:36 -07:00
case CPU_SB1 :
2005-10-19 23:56:20 -07:00
case CPU_SB1A :
2005-04-16 15:20:36 -07:00
case CPU_4KSC :
case CPU_20KC :
case CPU_25KF :
2007-09-25 15:40:12 +02:00
case CPU_BCM3302 :
case CPU_BCM4710 :
2007-06-06 14:52:43 +08:00
case CPU_LOONGSON2 :
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
case CPU_NEVADA :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ; /* QED specifies 2 nops hazard */
2005-04-16 15:20:36 -07:00
/*
* This branch uses up a mtc0 hazard nop slot and saves
* a nop after the tlbw instruction .
*/
2008-01-28 20:05:38 +00:00
uasm_il_bgezl ( p , r , 0 , label_tlbw_hazard ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_l_tlbw_hazard ( l , * p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_RM7000 :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
case CPU_RM9000 :
/*
* When the JTLB is updated by tlbwi or tlbwr , a subsequent
* use of the JTLB for instructions should not occur for 4
* cpu cycles and use for data translations should not occur
* for 3 cpu cycles .
*/
2008-01-28 20:05:38 +00:00
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
uasm_i_ssnop ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4181 :
case CPU_VR4181A :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
break ;
case CPU_VR4131 :
case CPU_VR4133 :
2005-08-29 16:49:55 +00:00
case CPU_R5432 :
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
tlbw ( p ) ;
break ;
default :
panic ( " No TLB refill handler yet (CPU type: %d) " ,
current_cpu_data . cputype ) ;
break ;
}
}
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
/*
* TMP and PTR are scratch .
* TMP will be clobbered , PTR will hold the pmd entry .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_get_pmde64 ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
unsigned int tmp , unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
/*
* The vmalloc handling is not in the hotpath .
*/
2008-01-28 20:05:38 +00:00
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# ifdef MODULE_START
2008-01-28 20:05:38 +00:00
uasm_il_bltz ( p , r , tmp , label_module_alloc ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# else
2008-01-28 20:05:38 +00:00
uasm_il_bltz ( p , r , tmp , label_vmalloc ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# endif
2008-01-28 20:05:38 +00:00
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2006-04-05 09:45:45 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as " CPU " index
*/
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , ptr , C0_TCBIND ) ;
uasm_i_dsrl ( p , ptr , ptr , 19 ) ;
2006-04-05 09:45:45 +01:00
# else
2005-04-16 15:20:36 -07:00
/*
2005-04-01 14:07:13 +00:00
* 64 bit SMP running in XKPHYS has smp_processor_id ( ) < < 3
2005-04-16 15:20:36 -07:00
* stored in CONTEXT .
*/
2008-01-28 20:05:38 +00:00
uasm_i_dmfc0 ( p , ptr , C0_CONTEXT ) ;
uasm_i_dsrl ( p , ptr , ptr , 23 ) ;
2006-04-05 09:45:45 +01:00
# endif
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ;
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ;
uasm_i_ld ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , ptr , pgdc ) ;
uasm_i_ld ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
2005-04-16 15:20:36 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_l_vmalloc_done ( l , * p ) ;
2006-10-24 02:29:01 +01:00
if ( PGDIR_SHIFT - 3 < 32 ) /* get pgd offset in bytes */
2008-01-28 20:05:38 +00:00
uasm_i_dsrl ( p , tmp , tmp , PGDIR_SHIFT - 3 ) ;
2006-10-24 02:29:01 +01:00
else
2008-01-28 20:05:38 +00:00
uasm_i_dsrl32 ( p , tmp , tmp , PGDIR_SHIFT - 3 - 32 ) ;
uasm_i_andi ( p , tmp , tmp , ( PTRS_PER_PGD - 1 ) < < 3 ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ; /* add in pgd offset */
uasm_i_dmfc0 ( p , tmp , C0_BADVADDR ) ; /* get faulting address */
uasm_i_ld ( p , ptr , 0 , ptr ) ; /* get pmd pointer */
uasm_i_dsrl ( p , tmp , tmp , PMD_SHIFT - 3 ) ; /* get pmd offset in bytes */
uasm_i_andi ( p , tmp , tmp , ( PTRS_PER_PMD - 1 ) < < 3 ) ;
uasm_i_daddu ( p , ptr , ptr , tmp ) ; /* add in pmd offset */
2005-04-16 15:20:36 -07:00
}
/*
* BVADDR is the faulting address , PTR is scratch .
* PTR will hold the pgd for vmalloc .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_get_pgd_vmalloc64 ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
unsigned int bvaddr , unsigned int ptr )
{
long swpd = ( long ) swapper_pg_dir ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# ifdef MODULE_START
long modd = ( long ) module_pg_dir ;
2008-01-28 20:05:38 +00:00
uasm_l_module_alloc ( l , * p ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
/*
* Assumption :
* VMALLOC_START > = 0xc000000000000000UL
* MODULE_START > = 0xe000000000000000UL
*/
2008-01-28 20:05:38 +00:00
UASM_i_SLL ( p , ptr , bvaddr , 2 ) ;
uasm_il_bgez ( p , r , ptr , label_vmalloc ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
2008-01-28 20:05:38 +00:00
if ( uasm_in_compat_space_p ( MODULE_START ) & &
! uasm_rel_lo ( MODULE_START ) ) {
uasm_i_lui ( p , ptr , uasm_rel_hi ( MODULE_START ) ) ; /* delay slot */
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
} else {
/* unlikely configuration */
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ; /* delay slot */
UASM_i_LA ( p , ptr , MODULE_START ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
}
2008-01-28 20:05:38 +00:00
uasm_i_dsubu ( p , bvaddr , bvaddr , ptr ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
2008-01-28 20:05:38 +00:00
if ( uasm_in_compat_space_p ( modd ) & & ! uasm_rel_lo ( modd ) ) {
uasm_il_b ( p , r , label_vmalloc_done ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( modd ) ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
} else {
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , ptr , modd ) ;
uasm_il_b ( p , r , label_vmalloc_done ) ;
if ( uasm_in_compat_space_p ( modd ) )
uasm_i_addiu ( p , ptr , ptr , uasm_rel_lo ( modd ) ) ;
2007-10-23 12:43:25 +01:00
else
2008-01-28 20:05:38 +00:00
uasm_i_daddiu ( p , ptr , ptr , uasm_rel_lo ( modd ) ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
}
2008-01-28 20:05:38 +00:00
uasm_l_vmalloc ( l , * p ) ;
if ( uasm_in_compat_space_p ( MODULE_START ) & &
! uasm_rel_lo ( MODULE_START ) & &
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
MODULE_START < < 32 = = VMALLOC_START )
2008-01-28 20:05:38 +00:00
uasm_i_dsll32 ( p , ptr , ptr , 0 ) ; /* typical case */
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
else
2008-01-28 20:05:38 +00:00
UASM_i_LA ( p , ptr , VMALLOC_START ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# else
2008-01-28 20:05:38 +00:00
uasm_l_vmalloc ( l , * p ) ;
UASM_i_LA ( p , ptr , VMALLOC_START ) ;
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be
compiled with -msym32 option. This makes each module ~10% smaller.
* introduce MODULE_START and MODULE_END
* custom module_alloc()
* PGD for modules
* change XTLB refill handler synthesizer
* enable -msym32 for modules again
(revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a)
New XTLB refill handler looks like this:
80000080 dmfc0 k0,C0_BADVADDR
80000084 bltz k0,800000e4 # goto l_module_alloc
80000088 lui k1,0x8046 # %high(pgd_current)
8000008c ld k1,24600(k1) # %low(pgd_current)
80000090 dsrl k0,k0,0x1b # l_vmalloc_done:
80000094 andi k0,k0,0x1ff8
80000098 daddu k1,k1,k0
8000009c dmfc0 k0,C0_BADVADDR
800000a0 ld k1,0(k1)
800000a4 dsrl k0,k0,0x12
800000a8 andi k0,k0,0xff8
800000ac daddu k1,k1,k0
800000b0 dmfc0 k0,C0_XCONTEXT
800000b4 ld k1,0(k1)
800000b8 andi k0,k0,0xff0
800000bc daddu k1,k1,k0
800000c0 ld k0,0(k1)
800000c4 ld k1,8(k1)
800000c8 dsrl k0,k0,0x6
800000cc mtc0 k0,C0_ENTRYLO0
800000d0 dsrl k1,k1,0x6
800000d4 mtc0 k1,C0_ENTRYL01
800000d8 nop
800000dc tlbwr
800000e0 eret
800000e4 dsll k1,k0,0x2 # l_module_alloc:
800000e8 bgez k1,80000008 # goto l_vmalloc
800000ec lui k1,0xc000
800000f0 dsubu k0,k0,k1
800000f4 lui k1,0x8046 # %high(module_pg_dir)
800000f8 beq zero,zero,80000000
800000fc nop
80000000 beq zero,zero,80000090 # goto l_vmalloc_done
80000004 daddiu k1,k1,0x4000
80000008 dsll32 k1,k1,0x0 # l_vmalloc:
8000000c dsubu k0,k0,k1
80000010 beq zero,zero,80000090 # goto l_vmalloc_done
80000014 lui k1,0x8046 # %high(swapper_pg_dir)
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-26 00:08:31 +09:00
# endif
2008-01-28 20:05:38 +00:00
uasm_i_dsubu ( p , bvaddr , bvaddr , ptr ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
if ( uasm_in_compat_space_p ( swpd ) & & ! uasm_rel_lo ( swpd ) ) {
uasm_il_b ( p , r , label_vmalloc_done ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( swpd ) ) ;
2005-04-16 15:20:36 -07:00
} else {
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , ptr , swpd ) ;
uasm_il_b ( p , r , label_vmalloc_done ) ;
if ( uasm_in_compat_space_p ( swpd ) )
uasm_i_addiu ( p , ptr , ptr , uasm_rel_lo ( swpd ) ) ;
2007-10-23 12:43:25 +01:00
else
2008-01-28 20:05:38 +00:00
uasm_i_daddiu ( p , ptr , ptr , uasm_rel_lo ( swpd ) ) ;
2005-04-16 15:20:36 -07:00
}
}
2005-09-03 15:56:16 -07:00
# else /* !CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
/*
* TMP and PTR are scratch .
* TMP will be clobbered , PTR will hold the pgd entry .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit __maybe_unused
2005-04-16 15:20:36 -07:00
build_get_pgde32 ( u32 * * p , unsigned int tmp , unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
# ifdef CONFIG_SMP
2006-04-05 09:45:45 +01:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as " CPU " index
*/
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , ptr , C0_TCBIND ) ;
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_srl ( p , ptr , ptr , 19 ) ;
2006-04-05 09:45:45 +01:00
# else
/*
* smp_processor_id ( ) < < 3 is stored in CONTEXT .
*/
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , ptr , C0_CONTEXT ) ;
UASM_i_LA_mostly ( p , tmp , pgdc ) ;
uasm_i_srl ( p , ptr , ptr , 23 ) ;
2006-04-05 09:45:45 +01:00
# endif
2008-01-28 20:05:38 +00:00
uasm_i_addu ( p , ptr , tmp , ptr ) ;
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
UASM_i_LA_mostly ( p , ptr , pgdc ) ;
2005-04-16 15:20:36 -07:00
# endif
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , tmp , C0_BADVADDR ) ; /* get faulting address */
uasm_i_lw ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
uasm_i_srl ( p , tmp , tmp , PGDIR_SHIFT ) ; /* get pgd only bits */
uasm_i_sll ( p , tmp , tmp , PGD_T_LOG2 ) ;
uasm_i_addu ( p , ptr , ptr , tmp ) ; /* add in pgd offset */
2005-04-16 15:20:36 -07:00
}
2005-09-03 15:56:16 -07:00
# endif /* !CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
2008-03-08 09:56:28 +00:00
static void __cpuinit build_adjust_context ( u32 * * p , unsigned int ctx )
2005-04-16 15:20:36 -07:00
{
2006-10-24 02:29:01 +01:00
unsigned int shift = 4 - ( PTE_T_LOG2 + 1 ) + PAGE_SHIFT - 12 ;
2005-04-16 15:20:36 -07:00
unsigned int mask = ( PTRS_PER_PTE / 2 - 1 ) < < ( PTE_T_LOG2 + 1 ) ;
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_VR41XX :
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4131 :
case CPU_VR4181 :
case CPU_VR4181A :
case CPU_VR4133 :
shift + = 2 ;
break ;
default :
break ;
}
if ( shift )
2008-01-28 20:05:38 +00:00
UASM_i_SRL ( p , ctx , ctx , shift ) ;
uasm_i_andi ( p , ctx , ctx , mask ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_get_ptep ( u32 * * p , unsigned int tmp , unsigned int ptr )
2005-04-16 15:20:36 -07:00
{
/*
* Bug workaround for the Nevada . It seems as if under certain
* circumstances the move from cp0_context might produce a
* bogus result when the mfc0 instruction and its consumer are
* in a different cacheline or a load instruction , probably any
* memory reference , is between them .
*/
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_NEVADA :
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , ptr , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
GET_CONTEXT ( p , tmp ) ; /* get context reg */
break ;
default :
GET_CONTEXT ( p , tmp ) ; /* get context reg */
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , ptr , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
break ;
}
build_adjust_context ( p , tmp ) ;
2008-01-28 20:05:38 +00:00
UASM_i_ADDU ( p , ptr , ptr , tmp ) ; /* add in offset */
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_update_entries ( u32 * * p , unsigned int tmp ,
2005-04-16 15:20:36 -07:00
unsigned int ptep )
{
/*
* 64 bit address support ( 36 bit on a 32 bit CPU ) in a 32 bit
* Kernel is a special case . Only a few CPUs use it .
*/
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits ) {
2008-01-28 20:05:38 +00:00
uasm_i_ld ( p , tmp , 0 , ptep ) ; /* get even pte */
uasm_i_ld ( p , ptep , sizeof ( pte_t ) , ptep ) ; /* get odd pte */
uasm_i_dsrl ( p , tmp , tmp , 6 ) ; /* convert to entrylo0 */
uasm_i_mtc0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
uasm_i_dsrl ( p , ptep , ptep , 6 ) ; /* convert to entrylo1 */
uasm_i_mtc0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-16 15:20:36 -07:00
} else {
int pte_off_even = sizeof ( pte_t ) / 2 ;
int pte_off_odd = pte_off_even + sizeof ( pte_t ) ;
/* The pte entries are pre-shifted */
2008-01-28 20:05:38 +00:00
uasm_i_lw ( p , tmp , pte_off_even , ptep ) ; /* get even pte */
uasm_i_mtc0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
uasm_i_lw ( p , ptep , pte_off_odd , ptep ) ; /* get odd pte */
uasm_i_mtc0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-16 15:20:36 -07:00
}
# else
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , tmp , 0 , ptep ) ; /* get even pte */
UASM_i_LW ( p , ptep , sizeof ( pte_t ) , ptep ) ; /* get odd pte */
2005-04-16 15:20:36 -07:00
if ( r45k_bvahwbug ( ) )
build_tlb_probe_entry ( p ) ;
2008-01-28 20:05:38 +00:00
UASM_i_SRL ( p , tmp , tmp , 6 ) ; /* convert to entrylo0 */
2005-04-16 15:20:36 -07:00
if ( r4k_250MHZhwbug ( ) )
2008-01-28 20:05:38 +00:00
uasm_i_mtc0 ( p , 0 , C0_ENTRYLO0 ) ;
uasm_i_mtc0 ( p , tmp , C0_ENTRYLO0 ) ; /* load it */
UASM_i_SRL ( p , ptep , ptep , 6 ) ; /* convert to entrylo1 */
2005-04-16 15:20:36 -07:00
if ( r45k_bvahwbug ( ) )
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , tmp , C0_INDEX ) ;
2005-04-16 15:20:36 -07:00
if ( r4k_250MHZhwbug ( ) )
2008-01-28 20:05:38 +00:00
uasm_i_mtc0 ( p , 0 , C0_ENTRYLO1 ) ;
uasm_i_mtc0 ( p , ptep , C0_ENTRYLO1 ) ; /* load it */
2005-04-16 15:20:36 -07:00
# endif
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_refill_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = tlb_handler ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
u32 * f ;
unsigned int final_len ;
memset ( tlb_handler , 0 , sizeof ( tlb_handler ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
memset ( final_handler , 0 , sizeof ( final_handler ) ) ;
/*
* create the plain linear handler
*/
if ( bcm1250_m3_war ( ) ) {
2008-01-28 20:05:38 +00:00
UASM_i_MFC0 ( & p , K0 , C0_BADVADDR ) ;
UASM_i_MFC0 ( & p , K1 , C0_ENTRYHI ) ;
uasm_i_xor ( & p , K0 , K0 , K1 ) ;
UASM_i_SRL ( & p , K0 , K0 , PAGE_SHIFT + 1 ) ;
uasm_il_bnez ( & p , & r , K0 , label_leave ) ;
/* No need for uasm_i_nop */
2005-04-16 15:20:36 -07:00
}
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
build_get_pmde64 ( & p , & l , & r , K0 , K1 ) ; /* get pmd in K1 */
# else
build_get_pgde32 ( & p , K0 , K1 ) ; /* get pgd in K1 */
# endif
build_get_ptep ( & p , K0 , K1 ) ;
build_update_entries ( & p , K0 , K1 ) ;
build_tlb_write_entry ( & p , & l , & r , tlb_random ) ;
2008-01-28 20:05:38 +00:00
uasm_l_leave ( & l , p ) ;
uasm_i_eret ( & p ) ; /* return from trap */
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
build_get_pgd_vmalloc64 ( & p , & l , & r , K0 , K1 ) ;
# endif
/*
* Overflow check : For the 64 bit handler , we need at least one
* free instruction slot for the wrap - around branch . In worst
* case , if the intended insertion point is a delay slot , we
2006-10-03 22:21:02 +02:00
* need three , with the second nop ' ed and the third being
2005-04-16 15:20:36 -07:00
* unused .
*/
2007-06-06 14:52:43 +08:00
/* Loongson2 ebase is different than r4k, we have more space */
# if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
2005-04-16 15:20:36 -07:00
if ( ( p - tlb_handler ) > 64 )
panic ( " TLB refill handler space exceeded " ) ;
# else
if ( ( ( p - tlb_handler ) > 63 )
| | ( ( ( p - tlb_handler ) > 61 )
2008-01-28 20:05:38 +00:00
& & uasm_insn_has_bdelay ( relocs , tlb_handler + 29 ) ) )
2005-04-16 15:20:36 -07:00
panic ( " TLB refill handler space exceeded " ) ;
# endif
/*
* Now fold the handler in the TLB refill handler space .
*/
2007-06-06 14:52:43 +08:00
# if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
2005-04-16 15:20:36 -07:00
f = final_handler ;
/* Simplest case, just copy the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , tlb_handler , p , f ) ;
2005-04-16 15:20:36 -07:00
final_len = p - tlb_handler ;
2005-09-03 15:56:16 -07:00
# else /* CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
f = final_handler + 32 ;
if ( ( p - tlb_handler ) < = 32 ) {
/* Just copy the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , tlb_handler , p , f ) ;
2005-04-16 15:20:36 -07:00
final_len = p - tlb_handler ;
} else {
u32 * split = tlb_handler + 30 ;
/*
* Find the split point .
*/
2008-01-28 20:05:38 +00:00
if ( uasm_insn_has_bdelay ( relocs , split - 1 ) )
2005-04-16 15:20:36 -07:00
split - - ;
/* Copy first part of the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , tlb_handler , split , f ) ;
2005-04-16 15:20:36 -07:00
f + = split - tlb_handler ;
/* Insert branch. */
2008-01-28 20:05:38 +00:00
uasm_l_split ( & l , final_handler ) ;
uasm_il_b ( & f , & r , label_split ) ;
if ( uasm_insn_has_bdelay ( relocs , split ) )
uasm_i_nop ( & f ) ;
2005-04-16 15:20:36 -07:00
else {
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , split , split + 1 , f ) ;
uasm_move_labels ( labels , f , f + 1 , - 1 ) ;
2005-04-16 15:20:36 -07:00
f + + ;
split + + ;
}
/* Copy the rest of the handler. */
2008-01-28 20:05:38 +00:00
uasm_copy_handler ( relocs , labels , split , p , final_handler ) ;
2005-04-16 15:20:36 -07:00
final_len = ( f - ( final_handler + 32 ) ) + ( p - split ) ;
}
2005-09-03 15:56:16 -07:00
# endif /* CONFIG_64BIT */
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB refill handler (%u instructions). \n " ,
final_len ) ;
2005-04-16 15:20:36 -07:00
2006-03-29 18:53:00 +01:00
memcpy ( ( void * ) ebase , final_handler , 0x100 ) ;
2007-10-18 09:11:17 +02:00
dump_handler ( ( u32 * ) ebase , 64 ) ;
2005-04-16 15:20:36 -07:00
}
/*
* TLB load / store / modify handlers .
*
* Only the fastpath gets synthesized at runtime , the slowpath for
* do_page_fault remains normal asm .
*/
extern void tlb_do_page_fault_0 ( void ) ;
extern void tlb_do_page_fault_1 ( void ) ;
/*
* 128 instructions for the fastpath handler is generous and should
* never be exceeded .
*/
# define FASTPATH_SIZE 128
2007-10-18 09:11:16 +02:00
u32 handle_tlbl [ FASTPATH_SIZE ] __cacheline_aligned ;
u32 handle_tlbs [ FASTPATH_SIZE ] __cacheline_aligned ;
u32 handle_tlbm [ FASTPATH_SIZE ] __cacheline_aligned ;
2005-04-16 15:20:36 -07:00
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
iPTE_LW ( u32 * * p , struct uasm_label * * l , unsigned int pte , unsigned int ptr )
2005-04-16 15:20:36 -07:00
{
# ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_lld ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_LL ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
# else
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_ld ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_LW ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
# endif
}
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
iPTE_SW ( u32 * * p , struct uasm_reloc * * r , unsigned int pte , unsigned int ptr ,
2005-04-28 08:52:57 +00:00
unsigned int mode )
2005-04-16 15:20:36 -07:00
{
2005-04-28 08:52:57 +00:00
# ifdef CONFIG_64BIT_PHYS_ADDR
unsigned int hwmode = mode & ( _PAGE_VALID | _PAGE_DIRTY ) ;
# endif
2008-01-28 20:05:38 +00:00
uasm_i_ori ( p , pte , pte , mode ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_scd ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_SC ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
if ( r10000_llsc_war ( ) )
2008-01-28 20:05:38 +00:00
uasm_il_beqzl ( p , r , pte , label_smp_pgtable_change ) ;
2005-04-16 15:20:36 -07:00
else
2008-01-28 20:05:38 +00:00
uasm_il_beqz ( p , r , pte , label_smp_pgtable_change ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( ! cpu_has_64bits ) {
2008-01-28 20:05:38 +00:00
/* no uasm_i_nop needed */
uasm_i_ll ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_ori ( p , pte , pte , hwmode ) ;
uasm_i_sc ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_il_beqz ( p , r , pte , label_smp_pgtable_change ) ;
/* no uasm_i_nop needed */
uasm_i_lw ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
} else
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
# else
2008-01-28 20:05:38 +00:00
uasm_i_nop ( p ) ;
2005-04-16 15:20:36 -07:00
# endif
# else
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( cpu_has_64bits )
2008-01-28 20:05:38 +00:00
uasm_i_sd ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
else
# endif
2008-01-28 20:05:38 +00:00
UASM_i_SW ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_64BIT_PHYS_ADDR
if ( ! cpu_has_64bits ) {
2008-01-28 20:05:38 +00:00
uasm_i_lw ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_ori ( p , pte , pte , hwmode ) ;
uasm_i_sw ( p , pte , sizeof ( pte_t ) / 2 , ptr ) ;
uasm_i_lw ( p , pte , 0 , ptr ) ;
2005-04-16 15:20:36 -07:00
}
# endif
# endif
}
/*
* Check if PTE is present , if not then jump to LABEL . PTR points to
* the page table where this PTE is located , PTE will be re - loaded
* with it ' s original value .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_pte_present ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
unsigned int pte , unsigned int ptr , enum label_id lid )
{
2008-01-28 20:05:38 +00:00
uasm_i_andi ( p , pte , pte , _PAGE_PRESENT | _PAGE_READ ) ;
uasm_i_xori ( p , pte , pte , _PAGE_PRESENT | _PAGE_READ ) ;
uasm_il_bnez ( p , r , pte , lid ) ;
2005-04-28 08:52:57 +00:00
iPTE_LW ( p , l , pte , ptr ) ;
2005-04-16 15:20:36 -07:00
}
/* Make PTE valid, store result in PTR. */
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_make_valid ( u32 * * p , struct uasm_reloc * * r , unsigned int pte ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2005-04-28 08:52:57 +00:00
unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED ;
iPTE_SW ( p , r , pte , ptr , mode ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Check if PTE can be written to , if not branch to LABEL . Regardless
* restore PTE with value from PTR when done .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_pte_writable ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
unsigned int pte , unsigned int ptr , enum label_id lid )
{
2008-01-28 20:05:38 +00:00
uasm_i_andi ( p , pte , pte , _PAGE_PRESENT | _PAGE_WRITE ) ;
uasm_i_xori ( p , pte , pte , _PAGE_PRESENT | _PAGE_WRITE ) ;
uasm_il_bnez ( p , r , pte , lid ) ;
2005-04-28 08:52:57 +00:00
iPTE_LW ( p , l , pte , ptr ) ;
2005-04-16 15:20:36 -07:00
}
/* Make PTE writable, update software status bits as well, then store
* at PTR .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_make_write ( u32 * * p , struct uasm_reloc * * r , unsigned int pte ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2005-04-28 08:52:57 +00:00
unsigned int mode = ( _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
| _PAGE_DIRTY ) ;
iPTE_SW ( p , r , pte , ptr , mode ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Check if PTE can be modified , if not branch to LABEL . Regardless
* restore PTE with value from PTR when done .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_pte_modifiable ( u32 * * p , struct uasm_label * * l , struct uasm_reloc * * r ,
2005-04-16 15:20:36 -07:00
unsigned int pte , unsigned int ptr , enum label_id lid )
{
2008-01-28 20:05:38 +00:00
uasm_i_andi ( p , pte , pte , _PAGE_WRITE ) ;
uasm_il_beqz ( p , r , pte , lid ) ;
2005-04-28 08:52:57 +00:00
iPTE_LW ( p , l , pte , ptr ) ;
2005-04-16 15:20:36 -07:00
}
/*
* R3000 style TLB load / store / modify handlers .
*/
2005-06-13 20:24:00 +00:00
/*
* This places the pte into ENTRYLO0 and writes it with tlbwi .
* Then it returns .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2005-06-13 20:24:00 +00:00
build_r3000_pte_reload_tlbwi ( u32 * * p , unsigned int pte , unsigned int tmp )
2005-04-16 15:20:36 -07:00
{
2008-01-28 20:05:38 +00:00
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* cp0 delay */
uasm_i_mfc0 ( p , tmp , C0_EPC ) ; /* cp0 delay */
uasm_i_tlbwi ( p ) ;
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
2005-04-16 15:20:36 -07:00
}
/*
2005-06-13 20:24:00 +00:00
* This places the pte into ENTRYLO0 and writes it with tlbwi
* or tlbwr as appropriate . This is because the index register
* may have the probe fail bit set as a result of a trap on a
* kseg2 access , i . e . without refill . Then it returns .
2005-04-16 15:20:36 -07:00
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_r3000_tlb_reload_write ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int pte ,
unsigned int tmp )
{
uasm_i_mfc0 ( p , tmp , C0_INDEX ) ;
uasm_i_mtc0 ( p , pte , C0_ENTRYLO0 ) ; /* cp0 delay */
uasm_il_bltz ( p , r , tmp , label_r3000_write_probe_fail ) ; /* cp0 delay */
uasm_i_mfc0 ( p , tmp , C0_EPC ) ; /* branch delay */
uasm_i_tlbwi ( p ) ; /* cp0 delay */
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
uasm_l_r3000_write_probe_fail ( l , * p ) ;
uasm_i_tlbwr ( p ) ; /* cp0 delay */
uasm_i_jr ( p , tmp ) ;
uasm_i_rfe ( p ) ; /* branch delay */
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit
2005-04-16 15:20:36 -07:00
build_r3000_tlbchange_handler_head ( u32 * * p , unsigned int pte ,
unsigned int ptr )
{
long pgdc = ( long ) pgd_current ;
2008-01-28 20:05:38 +00:00
uasm_i_mfc0 ( p , pte , C0_BADVADDR ) ;
uasm_i_lui ( p , ptr , uasm_rel_hi ( pgdc ) ) ; /* cp0 delay */
uasm_i_lw ( p , ptr , uasm_rel_lo ( pgdc ) , ptr ) ;
uasm_i_srl ( p , pte , pte , 22 ) ; /* load delay */
uasm_i_sll ( p , pte , pte , 2 ) ;
uasm_i_addu ( p , ptr , ptr , pte ) ;
uasm_i_mfc0 ( p , pte , C0_CONTEXT ) ;
uasm_i_lw ( p , ptr , 0 , ptr ) ; /* cp0 delay */
uasm_i_andi ( p , pte , pte , 0xffc ) ; /* load delay */
uasm_i_addu ( p , ptr , ptr , pte ) ;
uasm_i_lw ( p , pte , 0 , ptr ) ;
uasm_i_tlbp ( p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_load_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbl ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbl , 0 , sizeof ( handle_tlbl ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
build_pte_present ( & p , & l , & r , K0 , K1 , label_nopage_tlbl ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
build_make_valid ( & p , & r , K0 , K1 ) ;
2005-06-13 20:24:00 +00:00
build_r3000_tlb_reload_write ( & p , & l , & r , K0 , K1 ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbl ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_0 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbl ) > FASTPATH_SIZE )
panic ( " TLB load handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB load handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbl , ARRAY_SIZE ( handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_store_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbs ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbs , 0 , sizeof ( handle_tlbs ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
build_pte_writable ( & p , & l , & r , K0 , K1 , label_nopage_tlbs ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
build_make_write ( & p , & r , K0 , K1 ) ;
2005-06-13 20:24:00 +00:00
build_r3000_tlb_reload_write ( & p , & l , & r , K0 , K1 ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbs ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbs ) > FASTPATH_SIZE )
panic ( " TLB store handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB store handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbs , ARRAY_SIZE ( handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r3000_tlb_modify_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbm ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbm , 0 , sizeof ( handle_tlbm ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r3000_tlbchange_handler_head ( & p , K0 , K1 ) ;
build_pte_modifiable ( & p , & l , & r , K0 , K1 , label_nopage_tlbm ) ;
2008-01-28 20:05:38 +00:00
uasm_i_nop ( & p ) ; /* load delay */
2005-04-16 15:20:36 -07:00
build_make_write ( & p , & r , K0 , K1 ) ;
2005-06-13 20:24:00 +00:00
build_r3000_pte_reload_tlbwi ( & p , K0 , K1 ) ;
2005-04-16 15:20:36 -07:00
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbm ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbm ) > FASTPATH_SIZE )
panic ( " TLB modify handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB modify handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbm ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbm , ARRAY_SIZE ( handle_tlbm ) ) ;
2005-04-16 15:20:36 -07:00
}
/*
* R4000 style TLB load / store / modify handlers .
*/
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_r4000_tlbchange_handler_head ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int pte ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
build_get_pmde64 ( p , l , r , pte , ptr ) ; /* get pmd in ptr */
# else
build_get_pgde32 ( p , pte , ptr ) ; /* get pgd in ptr */
# endif
2008-01-28 20:05:38 +00:00
UASM_i_MFC0 ( p , pte , C0_BADVADDR ) ;
UASM_i_LW ( p , ptr , 0 , ptr ) ;
UASM_i_SRL ( p , pte , pte , PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 ) ;
uasm_i_andi ( p , pte , pte , ( PTRS_PER_PTE - 1 ) < < PTE_T_LOG2 ) ;
UASM_i_ADDU ( p , ptr , ptr , pte ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2008-01-28 20:05:38 +00:00
uasm_l_smp_pgtable_change ( l , * p ) ;
# endif
2005-04-28 08:52:57 +00:00
iPTE_LW ( p , l , pte , ptr ) ; /* get even pte */
2006-08-23 14:26:50 +01:00
if ( ! m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( p ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit
2008-01-28 20:05:38 +00:00
build_r4000_tlbchange_handler_tail ( u32 * * p , struct uasm_label * * l ,
struct uasm_reloc * * r , unsigned int tmp ,
2005-04-16 15:20:36 -07:00
unsigned int ptr )
{
2008-01-28 20:05:38 +00:00
uasm_i_ori ( p , ptr , ptr , sizeof ( pte_t ) ) ;
uasm_i_xori ( p , ptr , ptr , sizeof ( pte_t ) ) ;
2005-04-16 15:20:36 -07:00
build_update_entries ( p , tmp , ptr ) ;
build_tlb_write_entry ( p , l , r , tlb_indexed ) ;
2008-01-28 20:05:38 +00:00
uasm_l_leave ( l , * p ) ;
uasm_i_eret ( p ) ; /* return from trap */
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2005-04-16 15:20:36 -07:00
build_get_pgd_vmalloc64 ( p , l , r , tmp , ptr ) ;
# endif
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_load_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbl ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbl , 0 , sizeof ( handle_tlbl ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
if ( bcm1250_m3_war ( ) ) {
2008-01-28 20:05:38 +00:00
UASM_i_MFC0 ( & p , K0 , C0_BADVADDR ) ;
UASM_i_MFC0 ( & p , K1 , C0_ENTRYHI ) ;
uasm_i_xor ( & p , K0 , K0 , K1 ) ;
UASM_i_SRL ( & p , K0 , K0 , PAGE_SHIFT + 1 ) ;
uasm_il_bnez ( & p , & r , K0 , label_leave ) ;
/* No need for uasm_i_nop */
2005-04-16 15:20:36 -07:00
}
build_r4000_tlbchange_handler_head ( & p , & l , & r , K0 , K1 ) ;
build_pte_present ( & p , & l , & r , K0 , K1 , label_nopage_tlbl ) ;
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-16 15:20:36 -07:00
build_make_valid ( & p , & r , K0 , K1 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , K0 , K1 ) ;
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbl ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_0 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbl ) > FASTPATH_SIZE )
panic ( " TLB load handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB load handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbl , ARRAY_SIZE ( handle_tlbl ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_store_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbs ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbs , 0 , sizeof ( handle_tlbs ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r4000_tlbchange_handler_head ( & p , & l , & r , K0 , K1 ) ;
build_pte_writable ( & p , & l , & r , K0 , K1 , label_nopage_tlbs ) ;
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-16 15:20:36 -07:00
build_make_write ( & p , & r , K0 , K1 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , K0 , K1 ) ;
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbs ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbs ) > FASTPATH_SIZE )
panic ( " TLB store handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB store handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbs , ARRAY_SIZE ( handle_tlbs ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
static void __cpuinit build_r4000_tlb_modify_handler ( void )
2005-04-16 15:20:36 -07:00
{
u32 * p = handle_tlbm ;
2008-01-28 20:05:38 +00:00
struct uasm_label * l = labels ;
struct uasm_reloc * r = relocs ;
2005-04-16 15:20:36 -07:00
memset ( handle_tlbm , 0 , sizeof ( handle_tlbm ) ) ;
memset ( labels , 0 , sizeof ( labels ) ) ;
memset ( relocs , 0 , sizeof ( relocs ) ) ;
build_r4000_tlbchange_handler_head ( & p , & l , & r , K0 , K1 ) ;
build_pte_modifiable ( & p , & l , & r , K0 , K1 , label_nopage_tlbm ) ;
2006-08-23 14:26:50 +01:00
if ( m4kc_tlbp_war ( ) )
build_tlb_probe_entry ( & p ) ;
2005-04-16 15:20:36 -07:00
/* Present and writable bits set, set accessed and dirty bits. */
build_make_write ( & p , & r , K0 , K1 ) ;
build_r4000_tlbchange_handler_tail ( & p , & l , & r , K0 , K1 ) ;
2008-01-28 20:05:38 +00:00
uasm_l_nopage_tlbm ( & l , p ) ;
uasm_i_j ( & p , ( unsigned long ) tlb_do_page_fault_1 & 0x0fffffff ) ;
uasm_i_nop ( & p ) ;
2005-04-16 15:20:36 -07:00
if ( ( p - handle_tlbm ) > FASTPATH_SIZE )
panic ( " TLB modify handler fastpath space exceeded " ) ;
2008-01-28 20:05:38 +00:00
uasm_resolve_relocs ( relocs , labels ) ;
pr_debug ( " Wrote TLB modify handler fastpath (%u instructions). \n " ,
( unsigned int ) ( p - handle_tlbm ) ) ;
2006-07-09 01:47:06 +01:00
2007-10-18 09:11:17 +02:00
dump_handler ( handle_tlbm , ARRAY_SIZE ( handle_tlbm ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-03-08 09:56:28 +00:00
void __cpuinit build_tlb_refill_handler ( void )
2005-04-16 15:20:36 -07:00
{
/*
* The refill handler is generated per - CPU , multi - node systems
* may have local storage for it . The other handlers are only
* needed once .
*/
static int run_once = 0 ;
2007-10-11 23:46:15 +01:00
switch ( current_cpu_type ( ) ) {
2005-04-16 15:20:36 -07:00
case CPU_R2000 :
case CPU_R3000 :
case CPU_R3000A :
case CPU_R3081E :
case CPU_TX3912 :
case CPU_TX3922 :
case CPU_TX3927 :
build_r3000_tlb_refill_handler ( ) ;
if ( ! run_once ) {
build_r3000_tlb_load_handler ( ) ;
build_r3000_tlb_store_handler ( ) ;
build_r3000_tlb_modify_handler ( ) ;
run_once + + ;
}
break ;
case CPU_R6000 :
case CPU_R6000A :
panic ( " No R6000 TLB refill handler yet " ) ;
break ;
case CPU_R8000 :
panic ( " No R8000 TLB refill handler yet " ) ;
break ;
default :
build_r4000_tlb_refill_handler ( ) ;
if ( ! run_once ) {
build_r4000_tlb_load_handler ( ) ;
build_r4000_tlb_store_handler ( ) ;
build_r4000_tlb_modify_handler ( ) ;
run_once + + ;
}
}
}
2005-07-15 15:23:23 +00:00
2008-03-08 09:56:28 +00:00
void __cpuinit flush_tlb_handlers ( void )
2005-07-15 15:23:23 +00:00
{
flush_icache_range ( ( unsigned long ) handle_tlbl ,
( unsigned long ) handle_tlbl + sizeof ( handle_tlbl ) ) ;
flush_icache_range ( ( unsigned long ) handle_tlbs ,
( unsigned long ) handle_tlbs + sizeof ( handle_tlbs ) ) ;
flush_icache_range ( ( unsigned long ) handle_tlbm ,
( unsigned long ) handle_tlbm + sizeof ( handle_tlbm ) ) ;
}