2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1996 David S . Miller ( dm @ engr . sgi . com )
* Copyright ( C ) 1997 , 1998 , 1999 , 2000 Ralf Baechle ralf @ gnu . org
* Carsten Langgaard , carstenl @ mips . com
* Copyright ( C ) 2002 MIPS Technologies , Inc . All rights reserved .
*/
# include <linux/config.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <asm/cpu.h>
# include <asm/bootinfo.h>
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/system.h>
extern void build_tlb_refill_handler ( void ) ;
2005-04-02 14:21:56 +04:00
/*
* Make sure all entries differ . If they ' re not different
* MIPS32 will take revenge . . .
*/
# define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
2005-04-17 02:20:36 +04:00
/* CP0 hazard avoidance. */
# define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
" nop; nop; nop; nop; nop; nop; \n \t " \
" .set reorder \n \t " )
void local_flush_tlb_all ( void )
{
unsigned long flags ;
unsigned long old_ctx ;
int entry ;
local_irq_save ( flags ) ;
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
entry = read_c0_wired ( ) ;
/* Blast 'em all away. */
while ( entry < current_cpu_data . tlbsize ) {
2005-04-02 14:21:56 +04:00
/* Make sure all entries differ. */
write_c0_entryhi ( UNIQUE_ENTRYHI ( entry ) ) ;
2005-04-17 02:20:36 +04:00
write_c0_index ( entry ) ;
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
entry + + ;
}
tlbw_use_hazard ( ) ;
write_c0_entryhi ( old_ctx ) ;
local_irq_restore ( flags ) ;
}
2005-04-02 14:21:56 +04:00
/* All entries common to a mm share an asid. To effectively flush
these entries , we just bump the asid . */
2005-04-17 02:20:36 +04:00
void local_flush_tlb_mm ( struct mm_struct * mm )
{
2005-04-02 14:21:56 +04:00
int cpu ;
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
2005-04-02 14:21:56 +04:00
cpu = smp_processor_id ( ) ;
if ( cpu_context ( cpu , mm ) ! = 0 ) {
drop_mmu_context ( mm , cpu ) ;
}
preempt_enable ( ) ;
2005-04-17 02:20:36 +04:00
}
void local_flush_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
struct mm_struct * mm = vma - > vm_mm ;
int cpu = smp_processor_id ( ) ;
if ( cpu_context ( cpu , mm ) ! = 0 ) {
unsigned long flags ;
int size ;
size = ( end - start + ( PAGE_SIZE - 1 ) ) > > PAGE_SHIFT ;
size = ( size + 1 ) > > 1 ;
2005-04-02 14:21:56 +04:00
local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
if ( size < = current_cpu_data . tlbsize / 2 ) {
int oldpid = read_c0_entryhi ( ) ;
int newpid = cpu_asid ( cpu , mm ) ;
start & = ( PAGE_MASK < < 1 ) ;
end + = ( ( PAGE_SIZE < < 1 ) - 1 ) ;
end & = ( PAGE_MASK < < 1 ) ;
while ( start < end ) {
int idx ;
write_c0_entryhi ( start | newpid ) ;
start + = ( PAGE_SIZE < < 1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
BARRIER ;
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx < 0 )
continue ;
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
}
tlbw_use_hazard ( ) ;
write_c0_entryhi ( oldpid ) ;
} else {
drop_mmu_context ( mm , cpu ) ;
}
local_irq_restore ( flags ) ;
}
}
void local_flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
unsigned long flags ;
int size ;
size = ( end - start + ( PAGE_SIZE - 1 ) ) > > PAGE_SHIFT ;
size = ( size + 1 ) > > 1 ;
2005-04-02 14:21:56 +04:00
local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
if ( size < = current_cpu_data . tlbsize / 2 ) {
int pid = read_c0_entryhi ( ) ;
start & = ( PAGE_MASK < < 1 ) ;
end + = ( ( PAGE_SIZE < < 1 ) - 1 ) ;
end & = ( PAGE_MASK < < 1 ) ;
while ( start < end ) {
int idx ;
write_c0_entryhi ( start ) ;
start + = ( PAGE_SIZE < < 1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
BARRIER ;
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx < 0 )
continue ;
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
}
tlbw_use_hazard ( ) ;
write_c0_entryhi ( pid ) ;
} else {
local_flush_tlb_all ( ) ;
}
local_irq_restore ( flags ) ;
}
void local_flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
{
int cpu = smp_processor_id ( ) ;
if ( cpu_context ( cpu , vma - > vm_mm ) ! = 0 ) {
unsigned long flags ;
int oldpid , newpid , idx ;
newpid = cpu_asid ( cpu , vma - > vm_mm ) ;
page & = ( PAGE_MASK < < 1 ) ;
local_irq_save ( flags ) ;
oldpid = read_c0_entryhi ( ) ;
write_c0_entryhi ( page | newpid ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
BARRIER ;
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx < 0 )
goto finish ;
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
finish :
write_c0_entryhi ( oldpid ) ;
local_irq_restore ( flags ) ;
}
}
/*
* This one is only used for pages with the global bit set so we don ' t care
* much about the ASID .
*/
void local_flush_tlb_one ( unsigned long page )
{
unsigned long flags ;
int oldpid , idx ;
local_irq_save ( flags ) ;
oldpid = read_c0_entryhi ( ) ;
2005-04-02 14:21:56 +04:00
page & = ( PAGE_MASK < < 1 ) ;
2005-04-17 02:20:36 +04:00
write_c0_entryhi ( page ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
BARRIER ;
idx = read_c0_index ( ) ;
write_c0_entrylo0 ( 0 ) ;
write_c0_entrylo1 ( 0 ) ;
if ( idx > = 0 ) {
/* Make sure all entries differ. */
2005-04-02 14:21:56 +04:00
write_c0_entryhi ( UNIQUE_ENTRYHI ( idx ) ) ;
2005-04-17 02:20:36 +04:00
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
}
write_c0_entryhi ( oldpid ) ;
local_irq_restore ( flags ) ;
}
/*
* We will need multiple versions of update_mmu_cache ( ) , one that just
* updates the TLB with the new pte ( s ) , and another which also checks
* for the R4k " end of page " hardware bug and does the needy .
*/
void __update_tlb ( struct vm_area_struct * vma , unsigned long address , pte_t pte )
{
unsigned long flags ;
pgd_t * pgdp ;
2005-02-10 15:19:59 +03:00
pud_t * pudp ;
2005-04-17 02:20:36 +04:00
pmd_t * pmdp ;
pte_t * ptep ;
int idx , pid ;
/*
* Handle debugger faulting in for debugee .
*/
if ( current - > active_mm ! = vma - > vm_mm )
return ;
local_irq_save ( flags ) ;
2005-04-02 14:21:56 +04:00
pid = read_c0_entryhi ( ) & ASID_MASK ;
2005-04-17 02:20:36 +04:00
address & = ( PAGE_MASK < < 1 ) ;
write_c0_entryhi ( address | pid ) ;
pgdp = pgd_offset ( vma - > vm_mm , address ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
BARRIER ;
2005-02-10 15:19:59 +03:00
pudp = pud_offset ( pgdp , address ) ;
pmdp = pmd_offset ( pudp , address ) ;
2005-04-17 02:20:36 +04:00
idx = read_c0_index ( ) ;
ptep = pte_offset_map ( pmdp , address ) ;
2005-07-06 16:08:11 +04:00
# if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
2005-02-02 02:02:12 +03:00
write_c0_entrylo0 ( ptep - > pte_high ) ;
ptep + + ;
write_c0_entrylo1 ( ptep - > pte_high ) ;
2005-04-17 02:20:36 +04:00
# else
2005-02-02 02:02:12 +03:00
write_c0_entrylo0 ( pte_val ( * ptep + + ) > > 6 ) ;
write_c0_entrylo1 ( pte_val ( * ptep ) > > 6 ) ;
2005-04-17 02:20:36 +04:00
# endif
mtc0_tlbw_hazard ( ) ;
if ( idx < 0 )
tlb_write_random ( ) ;
else
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
local_irq_restore ( flags ) ;
}
#if 0
static void r4k_update_mmu_cache_hwbug ( struct vm_area_struct * vma ,
unsigned long address , pte_t pte )
{
unsigned long flags ;
unsigned int asid ;
pgd_t * pgdp ;
pmd_t * pmdp ;
pte_t * ptep ;
int idx ;
local_irq_save ( flags ) ;
address & = ( PAGE_MASK < < 1 ) ;
asid = read_c0_entryhi ( ) & ASID_MASK ;
write_c0_entryhi ( address | asid ) ;
pgdp = pgd_offset ( vma - > vm_mm , address ) ;
mtc0_tlbw_hazard ( ) ;
tlb_probe ( ) ;
BARRIER ;
pmdp = pmd_offset ( pgdp , address ) ;
idx = read_c0_index ( ) ;
ptep = pte_offset_map ( pmdp , address ) ;
write_c0_entrylo0 ( pte_val ( * ptep + + ) > > 6 ) ;
write_c0_entrylo1 ( pte_val ( * ptep ) > > 6 ) ;
mtc0_tlbw_hazard ( ) ;
if ( idx < 0 )
tlb_write_random ( ) ;
else
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
local_irq_restore ( flags ) ;
}
# endif
void __init add_wired_entry ( unsigned long entrylo0 , unsigned long entrylo1 ,
unsigned long entryhi , unsigned long pagemask )
{
unsigned long flags ;
unsigned long wired ;
unsigned long old_pagemask ;
unsigned long old_ctx ;
local_irq_save ( flags ) ;
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi ( ) ;
old_pagemask = read_c0_pagemask ( ) ;
wired = read_c0_wired ( ) ;
write_c0_wired ( wired + 1 ) ;
write_c0_index ( wired ) ;
BARRIER ;
write_c0_pagemask ( pagemask ) ;
write_c0_entryhi ( entryhi ) ;
write_c0_entrylo0 ( entrylo0 ) ;
write_c0_entrylo1 ( entrylo1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
write_c0_entryhi ( old_ctx ) ;
BARRIER ;
write_c0_pagemask ( old_pagemask ) ;
local_flush_tlb_all ( ) ;
local_irq_restore ( flags ) ;
}
/*
* Used for loading TLB entries before trap_init ( ) has started , when we
* don ' t actually want to add a wired entry which remains throughout the
* lifetime of the system
*/
static int temp_tlb_entry __initdata ;
__init int add_temporary_entry ( unsigned long entrylo0 , unsigned long entrylo1 ,
unsigned long entryhi , unsigned long pagemask )
{
int ret = 0 ;
unsigned long flags ;
unsigned long wired ;
unsigned long old_pagemask ;
unsigned long old_ctx ;
local_irq_save ( flags ) ;
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi ( ) ;
old_pagemask = read_c0_pagemask ( ) ;
wired = read_c0_wired ( ) ;
if ( - - temp_tlb_entry < wired ) {
2005-02-02 02:02:12 +03:00
printk ( KERN_WARNING
" No TLB space left for add_temporary_entry \n " ) ;
2005-04-17 02:20:36 +04:00
ret = - ENOSPC ;
goto out ;
}
write_c0_index ( temp_tlb_entry ) ;
write_c0_pagemask ( pagemask ) ;
write_c0_entryhi ( entryhi ) ;
write_c0_entrylo0 ( entrylo0 ) ;
write_c0_entrylo1 ( entrylo1 ) ;
mtc0_tlbw_hazard ( ) ;
tlb_write_indexed ( ) ;
tlbw_use_hazard ( ) ;
write_c0_entryhi ( old_ctx ) ;
write_c0_pagemask ( old_pagemask ) ;
out :
local_irq_restore ( flags ) ;
return ret ;
}
static void __init probe_tlb ( unsigned long config )
{
struct cpuinfo_mips * c = & current_cpu_data ;
unsigned int reg ;
/*
* If this isn ' t a MIPS32 / MIPS64 compliant CPU . Config 1 register
* is not supported , we assume R4k style . Cpu probing already figured
* out the number of tlb entries .
*/
2005-02-02 02:02:12 +03:00
if ( ( c - > processor_id & 0xff0000 ) = = PRID_COMP_LEGACY )
2005-04-17 02:20:36 +04:00
return ;
reg = read_c0_config1 ( ) ;
if ( ! ( ( config > > 7 ) & 3 ) )
panic ( " No TLB present " ) ;
c - > tlbsize = ( ( reg > > 25 ) & 0x3f ) + 1 ;
}
void __init tlb_init ( void )
{
unsigned int config = read_c0_config ( ) ;
/*
* You should never change this register :
* - On R4600 1.7 the tlbp never hits for pages smaller than
* the value in the c0_pagemask register .
* - The entire mm handling assumes the c0_pagemask register to
* be set for 4 kb pages .
*/
probe_tlb ( config ) ;
write_c0_pagemask ( PM_DEFAULT_MASK ) ;
write_c0_wired ( 0 ) ;
2006-03-14 17:35:27 +03:00
write_c0_framemask ( 0 ) ;
2005-04-17 02:20:36 +04:00
temp_tlb_entry = current_cpu_data . tlbsize - 1 ;
2006-03-14 17:35:27 +03:00
/* From this point on the ARC firmware is dead. */
2005-04-17 02:20:36 +04:00
local_flush_tlb_all ( ) ;
2006-03-14 17:35:27 +03:00
/* Did I tell you that ARC SUCKS? */
2005-04-17 02:20:36 +04:00
build_tlb_refill_handler ( ) ;
}