2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1996 David S . Miller ( dm @ engr . sgi . com )
* Copyright ( C ) 1997 , 1998 , 1999 , 2000 Ralf Baechle ralf @ gnu . org
* Carsten Langgaard , carstenl @ mips . com
* Copyright ( C ) 2002 MIPS Technologies , Inc . All rights reserved .
*/
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <asm/cpu.h>
# include <asm/bootinfo.h>
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/system.h>
extern void build_tlb_refill_handler ( void ) ;
2005-04-02 14:21:56 +04:00
/*
* Make sure all entries differ . If they ' re not different
* MIPS32 will take revenge . . .
*/
# define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
2006-04-05 12:45:45 +04:00
/* Atomicity and interruptability */
# ifdef CONFIG_MIPS_MT_SMTC
# include <asm/smtc.h>
# include <asm/mipsmtregs.h>
# define ENTER_CRITICAL(flags) \
{ \
unsigned int mvpflags ; \
local_irq_save ( flags ) ; \
mvpflags = dvpe ( )
# define EXIT_CRITICAL(flags) \
evpe ( mvpflags ) ; \
local_irq_restore ( flags ) ; \
}
# else
# define ENTER_CRITICAL(flags) local_irq_save(flags)
# define EXIT_CRITICAL(flags) local_irq_restore(flags)
# endif /* CONFIG_MIPS_MT_SMTC */
2007-06-06 10:52:43 +04:00
# if defined(CONFIG_CPU_LOONGSON2)
/*
* LOONGSON2 has a 4 entry itlb which is a subset of dtlb ,
* unfortrunately , itlb is not totally transparent to software .
*/
# define FLUSH_ITLB write_c0_diag(4);
# define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
# else
# define FLUSH_ITLB
# define FLUSH_ITLB_VM(vma)
# endif
2005-04-17 02:20:36 +04:00
void local_flush_tlb_all ( void )
{
unsigned long flags ;
unsigned long old_ctx ;
int entry ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
entry = read_c0_wired ( ) ;
/* Blast 'em all away. */
while ( entry < current_cpu_data . tlbsize ) {
2005-04-02 14:21:56 +04:00
/* Make sure all entries differ. */
write_c0_entryhi ( UNIQUE_ENTRYHI ( entry ) ) ;
2005-04-17 02:20:36 +04:00
write_c0_index ( entry ) ;
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
entry + + ;
}
tlbw_use_hazard ( ) ;
write_c0_entryhi ( old_ctx ) ;
2007-06-06 10:52:43 +04:00
FLUSH_ITLB ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-02 14:21:56 +04:00
/* All entries common to a mm share an asid. To effectively flush
these entries , we just bump the asid . */
2005-04-17 02:20:36 +04:00
void local_flush_tlb_mm ( struct mm_struct * mm )
{
2005-04-02 14:21:56 +04:00
int cpu ;
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
2005-04-02 14:21:56 +04:00
cpu = smp_processor_id ( ) ;
if ( cpu_context ( cpu , mm ) ! = 0 ) {
drop_mmu_context ( mm , cpu ) ;
}
preempt_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
void local_flush_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
struct mm_struct * mm = vma - > vm_mm ;
int cpu = smp_processor_id ( ) ;
if ( cpu_context ( cpu , mm ) ! = 0 ) {
unsigned long flags ;
int size ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
size = ( end - start + ( PAGE_SIZE - 1 ) ) > > PAGE_SHIFT ;
size = ( size + 1 ) > > 1 ;
if ( size < = current_cpu_data . tlbsize / 2 ) {
int oldpid = read_c0_entryhi ( ) ;
int newpid = cpu_asid ( cpu , mm ) ;
start & = ( PAGE_MASK < < 1 ) ;
end + = ( ( PAGE_SIZE < < 1 ) - 1 ) ;
end & = ( PAGE_MASK < < 1 ) ;
while ( start < end ) {
int idx ;
write_c0_entryhi ( start | newpid ) ;
start + = ( PAGE_SIZE < < 1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
2006-09-08 06:16:21 +04:00
tlb_probe_hazard ( ) ;
2005-04-17 02:20:36 +04:00
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx < 0 )
continue ;
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
}
tlbw_use_hazard ( ) ;
write_c0_entryhi ( oldpid ) ;
} else {
drop_mmu_context ( mm , cpu ) ;
}
2007-06-06 10:52:43 +04:00
FLUSH_ITLB ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
}
void local_flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
unsigned long flags ;
int size ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
size = ( end - start + ( PAGE_SIZE - 1 ) ) > > PAGE_SHIFT ;
size = ( size + 1 ) > > 1 ;
if ( size < = current_cpu_data . tlbsize / 2 ) {
int pid = read_c0_entryhi ( ) ;
start & = ( PAGE_MASK < < 1 ) ;
end + = ( ( PAGE_SIZE < < 1 ) - 1 ) ;
end & = ( PAGE_MASK < < 1 ) ;
while ( start < end ) {
int idx ;
write_c0_entryhi ( start ) ;
start + = ( PAGE_SIZE < < 1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
2006-09-08 06:16:21 +04:00
tlb_probe_hazard ( ) ;
2005-04-17 02:20:36 +04:00
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx < 0 )
continue ;
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
}
tlbw_use_hazard ( ) ;
write_c0_entryhi ( pid ) ;
} else {
local_flush_tlb_all ( ) ;
}
2007-06-06 10:52:43 +04:00
FLUSH_ITLB ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
void local_flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
{
int cpu = smp_processor_id ( ) ;
if ( cpu_context ( cpu , vma - > vm_mm ) ! = 0 ) {
unsigned long flags ;
int oldpid , newpid , idx ;
newpid = cpu_asid ( cpu , vma - > vm_mm ) ;
page & = ( PAGE_MASK < < 1 ) ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
oldpid = read_c0_entryhi ( ) ;
write_c0_entryhi ( page | newpid ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
2006-09-08 06:16:21 +04:00
tlb_probe_hazard ( ) ;
2005-04-17 02:20:36 +04:00
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx < 0 )
goto finish ;
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
finish :
write_c0_entryhi ( oldpid ) ;
2007-06-06 10:52:43 +04:00
FLUSH_ITLB_VM ( vma ) ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* This one is only used for pages with the global bit set so we don ' t care
* much about the ASID .
*/
void local_flush_tlb_one ( unsigned long page )
{
unsigned long flags ;
int oldpid , idx ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
oldpid = read_c0_entryhi ( ) ;
2005-04-02 14:21:56 +04:00
page & = ( PAGE_MASK < < 1 ) ;
2005-04-17 02:20:36 +04:00
write_c0_entryhi ( page ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
2006-09-08 06:16:21 +04:00
tlb_probe_hazard ( ) ;
2005-04-17 02:20:36 +04:00
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx > = 0 ) {
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
}
write_c0_entryhi ( oldpid ) ;
2007-06-06 10:52:43 +04:00
FLUSH_ITLB ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
/*
* We will need multiple versions of update_mmu_cache ( ) , one that just
* updates the TLB with the new pte ( s ) , and another which also checks
* for the R4k " end of page " hardware bug and does the needy .
*/
void __update_tlb ( struct vm_area_struct * vma , unsigned long address , pte_t pte )
{
unsigned long flags ;
pgd_t * pgdp ;
2005-02-10 15:19:59 +03:00
pud_t * pudp ;
2005-04-17 02:20:36 +04:00
pmd_t * pmdp ;
pte_t * ptep ;
int idx , pid ;
/*
* Handle debugger faulting in for debugee .
*/
if ( current - > active_mm ! = vma - > vm_mm )
return ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-02 14:21:56 +04:00
pid = read_c0_entryhi ( ) & ASID_MASK ;
2005-04-17 02:20:36 +04:00
address & = ( PAGE_MASK < < 1 ) ;
write_c0_entryhi ( address | pid ) ;
pgdp = pgd_offset ( vma - > vm_mm , address ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
2006-09-08 06:16:21 +04:00
tlb_probe_hazard ( ) ;
2005-02-10 15:19:59 +03:00
pudp = pud_offset ( pgdp , address ) ;
pmdp = pmd_offset ( pudp , address ) ;
2005-04-17 02:20:36 +04:00
idx = read_c0_index ( ) ;
ptep = pte_offset_map ( pmdp , address ) ;
2005-07-06 16:08:11 +04:00
# if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
2005-02-02 02:02:12 +03:00
write_c0_entrylo0 ( ptep - > pte_high ) ;
ptep + + ;
write_c0_entrylo1 ( ptep - > pte_high ) ;
2005-04-17 02:20:36 +04:00
# else
2005-02-02 02:02:12 +03:00
write_c0_entrylo0 ( pte_val ( * ptep + + ) > > 6 ) ;
write_c0_entrylo1 ( pte_val ( * ptep ) > > 6 ) ;
2005-04-17 02:20:36 +04:00
# endif
mtc0_tlbw_hazard ( ) ;
if ( idx < 0 )
tlb_write_random ( ) ;
else
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
2007-06-06 10:52:43 +04:00
FLUSH_ITLB_VM ( vma ) ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
#if 0
static void r4k_update_mmu_cache_hwbug ( struct vm_area_struct * vma ,
unsigned long address , pte_t pte )
{
unsigned long flags ;
unsigned int asid ;
pgd_t * pgdp ;
pmd_t * pmdp ;
pte_t * ptep ;
int idx ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
address & = ( PAGE_MASK < < 1 ) ;
asid = read_c0_entryhi ( ) & ASID_MASK ;
write_c0_entryhi ( address | asid ) ;
pgdp = pgd_offset ( vma - > vm_mm , address ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
2006-09-08 06:16:21 +04:00
tlb_probe_hazard ( ) ;
2005-04-17 02:20:36 +04:00
pmdp = pmd_offset ( pgdp , address ) ;
idx = read_c0_index ( ) ;
ptep = pte_offset_map ( pmdp , address ) ;
write_c0_entrylo0 ( pte_val ( * ptep + + ) > > 6 ) ;
write_c0_entrylo1 ( pte_val ( * ptep ) > > 6 ) ;
mtc0_tlbw_hazard ( ) ;
if ( idx < 0 )
tlb_write_random ( ) ;
else
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
# endif
void __init add_wired_entry ( unsigned long entrylo0 , unsigned long entrylo1 ,
unsigned long entryhi , unsigned long pagemask )
{
unsigned long flags ;
unsigned long wired ;
unsigned long old_pagemask ;
unsigned long old_ctx ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi ( ) ;
old_pagemask = read_c0_pagemask ( ) ;
wired = read_c0_wired ( ) ;
write_c0_wired ( wired + 1 ) ;
write_c0_index ( wired ) ;
2006-09-08 06:16:21 +04:00
tlbw_use_hazard ( ) ; /* What is the hazard here? */
2005-04-17 02:20:36 +04:00
write_c0_pagemask ( pagemask ) ;
write_c0_entryhi ( entryhi ) ;
write_c0_entrylo0 ( entrylo0 ) ;
write_c0_entrylo1 ( entrylo1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
write_c0_entryhi ( old_ctx ) ;
2006-09-08 06:16:21 +04:00
tlbw_use_hazard ( ) ; /* What is the hazard here? */
2005-04-17 02:20:36 +04:00
write_c0_pagemask ( old_pagemask ) ;
local_flush_tlb_all ( ) ;
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Used for loading TLB entries before trap_init ( ) has started , when we
* don ' t actually want to add a wired entry which remains throughout the
* lifetime of the system
*/
2008-03-08 12:56:28 +03:00
static int temp_tlb_entry __cpuinitdata ;
2005-04-17 02:20:36 +04:00
__init int add_temporary_entry ( unsigned long entrylo0 , unsigned long entrylo1 ,
unsigned long entryhi , unsigned long pagemask )
{
int ret = 0 ;
unsigned long flags ;
unsigned long wired ;
unsigned long old_pagemask ;
unsigned long old_ctx ;
2006-04-05 12:45:45 +04:00
ENTER_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi ( ) ;
old_pagemask = read_c0_pagemask ( ) ;
wired = read_c0_wired ( ) ;
if ( - - temp_tlb_entry < wired ) {
2005-02-02 02:02:12 +03:00
printk ( KERN_WARNING
" No TLB space left for add_temporary_entry \n " ) ;
2005-04-17 02:20:36 +04:00
ret = - ENOSPC ;
goto out ;
}
write_c0_index ( temp_tlb_entry ) ;
write_c0_pagemask ( pagemask ) ;
write_c0_entryhi ( entryhi ) ;
write_c0_entrylo0 ( entrylo0 ) ;
write_c0_entrylo1 ( entrylo1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
write_c0_entryhi ( old_ctx ) ;
write_c0_pagemask ( old_pagemask ) ;
out :
2006-04-05 12:45:45 +04:00
EXIT_CRITICAL ( flags ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2008-03-08 12:56:28 +03:00
static void __cpuinit probe_tlb ( unsigned long config )
2005-04-17 02:20:36 +04:00
{
struct cpuinfo_mips * c = & current_cpu_data ;
unsigned int reg ;
/*
* If this isn ' t a MIPS32 / MIPS64 compliant CPU . Config 1 register
* is not supported , we assume R4k style . Cpu probing already figured
* out the number of tlb entries .
*/
2005-02-02 02:02:12 +03:00
if ( ( c - > processor_id & 0xff0000 ) = = PRID_COMP_LEGACY )
2005-04-17 02:20:36 +04:00
return ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* If TLB is shared in SMTC system , total size already
* has been calculated and written into cpu_data tlbsize
*/
if ( ( smtc_status & SMTC_TLB_SHARED ) = = SMTC_TLB_SHARED )
return ;
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
reg = read_c0_config1 ( ) ;
if ( ! ( ( config > > 7 ) & 3 ) )
panic ( " No TLB present " ) ;
c - > tlbsize = ( ( reg > > 25 ) & 0x3f ) + 1 ;
}
2008-03-08 12:56:28 +03:00
static int __cpuinitdata ntlb = 0 ;
2006-04-05 12:45:45 +04:00
static int __init set_ntlb ( char * str )
{
get_option ( & str , & ntlb ) ;
return 1 ;
}
__setup ( " ntlb= " , set_ntlb ) ;
2008-03-08 12:56:28 +03:00
void __cpuinit tlb_init ( void )
2005-04-17 02:20:36 +04:00
{
unsigned int config = read_c0_config ( ) ;
/*
* You should never change this register :
* - On R4600 1.7 the tlbp never hits for pages smaller than
* the value in the c0_pagemask register .
* - The entire mm handling assumes the c0_pagemask register to
2008-02-29 03:43:47 +03:00
* be set to fixed - size pages .
2005-04-17 02:20:36 +04:00
*/
probe_tlb ( config ) ;
write_c0_pagemask ( PM_DEFAULT_MASK ) ;
write_c0_wired ( 0 ) ;
2006-03-14 17:35:27 +03:00
write_c0_framemask ( 0 ) ;
2005-04-17 02:20:36 +04:00
temp_tlb_entry = current_cpu_data . tlbsize - 1 ;
2006-03-14 17:35:27 +03:00
/* From this point on the ARC firmware is dead. */
2005-04-17 02:20:36 +04:00
local_flush_tlb_all ( ) ;
2006-03-14 17:35:27 +03:00
/* Did I tell you that ARC SUCKS? */
2006-04-05 12:45:45 +04:00
if ( ntlb ) {
if ( ntlb > 1 & & ntlb < = current_cpu_data . tlbsize ) {
int wired = current_cpu_data . tlbsize - ntlb ;
write_c0_wired ( wired ) ;
write_c0_index ( wired - 1 ) ;
2007-10-12 02:46:15 +04:00
printk ( " Restricting TLB to %d entries \n " , ntlb ) ;
2006-04-05 12:45:45 +04:00
} else
printk ( " Ignoring invalid argument ntlb=%d \n " , ntlb ) ;
}
2005-04-17 02:20:36 +04:00
build_tlb_refill_handler ( ) ;
}