2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2011-04-05 01:15:29 +04:00
* Copyright ( C ) 1996 David S . Miller ( davem @ davemloft . net )
2005-04-17 02:20:36 +04:00
* Copyright ( C ) 1997 , 1998 , 1999 , 2000 , 2001 , 2002 Ralf Baechle ( ralf @ gnu . org )
* Copyright ( C ) 1999 , 2000 Silicon Graphics , Inc .
*/
2007-11-03 04:01:37 +03:00
# include <linux/hardirq.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
2007-09-27 21:26:43 +04:00
# include <linux/highmem.h>
2005-04-17 02:20:36 +04:00
# include <linux/kernel.h>
2007-10-12 02:46:05 +04:00
# include <linux/linkage.h>
2005-04-17 02:20:36 +04:00
# include <linux/sched.h>
2009-06-19 17:05:26 +04:00
# include <linux/smp.h>
2005-04-17 02:20:36 +04:00
# include <linux/mm.h>
2007-09-19 03:58:24 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/bitops.h>
# include <asm/bcache.h>
# include <asm/bootinfo.h>
2005-07-13 15:48:45 +04:00
# include <asm/cache.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheops.h>
# include <asm/cpu.h>
# include <asm/cpu-features.h>
# include <asm/io.h>
# include <asm/page.h>
# include <asm/pgtable.h>
# include <asm/r4kcache.h>
2007-07-28 15:45:47 +04:00
# include <asm/sections.h>
2005-04-17 02:20:36 +04:00
# include <asm/mmu_context.h>
# include <asm/war.h>
2005-04-25 20:36:23 +04:00
# include <asm/cacheflush.h> /* for run_uncached() */
2012-05-15 11:04:49 +04:00
# include <asm/traps.h>
2013-03-25 22:47:29 +04:00
# include <asm/dma-coherence.h>
2006-05-12 16:20:06 +04:00
/*
* Special Variant of smp_call_function for use by cache functions :
*
* o No return value
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache .
2010-10-29 22:08:25 +04:00
* o doesn ' t disable interrupts on the local CPU
2006-05-12 16:20:06 +04:00
*/
2010-10-29 22:08:25 +04:00
static inline void r4k_on_each_cpu ( void ( * func ) ( void * info ) , void * info )
2006-05-12 16:20:06 +04:00
{
preempt_disable ( ) ;
# if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
2010-10-29 22:08:25 +04:00
smp_call_function ( func , info , 1 ) ;
2006-05-12 16:20:06 +04:00
# endif
func ( info ) ;
preempt_enable ( ) ;
}
2008-04-28 20:14:26 +04:00
# if defined(CONFIG_MIPS_CMP)
# define cpu_has_safe_index_cacheops 0
# else
# define cpu_has_safe_index_cacheops 1
# endif
2005-07-13 15:48:45 +04:00
/*
* Must die .
*/
static unsigned long icache_size __read_mostly ;
static unsigned long dcache_size __read_mostly ;
static unsigned long scache_size __read_mostly ;
2005-04-17 02:20:36 +04:00
/*
* Dummy cache handling routines for machines without boardcaches
*/
2006-06-20 21:06:52 +04:00
static void cache_noop ( void ) { }
2005-04-17 02:20:36 +04:00
static struct bcache_ops no_sc_ops = {
2006-06-20 21:06:52 +04:00
. bc_enable = ( void * ) cache_noop ,
. bc_disable = ( void * ) cache_noop ,
. bc_wback_inv = ( void * ) cache_noop ,
. bc_inv = ( void * ) cache_noop
2005-04-17 02:20:36 +04:00
} ;
struct bcache_ops * bcops = & no_sc_ops ;
2005-09-01 22:33:58 +04:00
# define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
# define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
2005-04-17 02:20:36 +04:00
# define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
if ( R4600_V2_HIT_CACHEOP_WAR & & cpu_is_r4600_v2_x ( ) ) \
* ( volatile unsigned long * ) CKSEG1 ; \
if ( R4600_V1_HIT_CACHEOP_WAR ) \
__asm__ __volatile__ ( " nop;nop;nop;nop " ) ; \
} while ( 0 )
static void ( * r4k_blast_dcache_page ) ( unsigned long addr ) ;
static inline void r4k_blast_dcache_page_dc32 ( unsigned long addr )
{
R4600_HIT_CACHEOP_WAR_IMPL ;
blast_dcache32_page ( addr ) ;
}
2009-04-24 04:36:53 +04:00
static inline void r4k_blast_dcache_page_dc64 ( unsigned long addr )
{
R4600_HIT_CACHEOP_WAR_IMPL ;
blast_dcache64_page ( addr ) ;
}
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_dcache_page_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long dc_lsize = cpu_dcache_line_size ( ) ;
2006-06-20 21:06:52 +04:00
if ( dc_lsize = = 0 )
r4k_blast_dcache_page = ( void * ) cache_noop ;
else if ( dc_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_dcache_page = blast_dcache16_page ;
else if ( dc_lsize = = 32 )
r4k_blast_dcache_page = r4k_blast_dcache_page_dc32 ;
2009-04-24 04:36:53 +04:00
else if ( dc_lsize = = 64 )
r4k_blast_dcache_page = r4k_blast_dcache_page_dc64 ;
2005-04-17 02:20:36 +04:00
}
static void ( * r4k_blast_dcache_page_indexed ) ( unsigned long addr ) ;
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_dcache_page_indexed_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long dc_lsize = cpu_dcache_line_size ( ) ;
2006-06-20 21:06:52 +04:00
if ( dc_lsize = = 0 )
r4k_blast_dcache_page_indexed = ( void * ) cache_noop ;
else if ( dc_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed ;
else if ( dc_lsize = = 32 )
r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed ;
2009-04-24 04:36:53 +04:00
else if ( dc_lsize = = 64 )
r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed ;
2005-04-17 02:20:36 +04:00
}
2012-11-22 06:34:10 +04:00
void ( * r4k_blast_dcache ) ( void ) ;
EXPORT_SYMBOL ( r4k_blast_dcache ) ;
2005-04-17 02:20:36 +04:00
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_dcache_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long dc_lsize = cpu_dcache_line_size ( ) ;
2006-06-20 21:06:52 +04:00
if ( dc_lsize = = 0 )
r4k_blast_dcache = ( void * ) cache_noop ;
else if ( dc_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_dcache = blast_dcache16 ;
else if ( dc_lsize = = 32 )
r4k_blast_dcache = blast_dcache32 ;
2009-04-24 04:36:53 +04:00
else if ( dc_lsize = = 64 )
r4k_blast_dcache = blast_dcache64 ;
2005-04-17 02:20:36 +04:00
}
/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
# define JUMP_TO_ALIGN(order) \
__asm__ __volatile__ ( \
" b \t 1f \n \t " \
" .align \t " # order " \n \t " \
" 1: \n \t " \
)
# define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
2013-01-22 15:59:30 +04:00
# define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
2005-04-17 02:20:36 +04:00
static inline void blast_r4600_v1_icache32 ( void )
{
unsigned long flags ;
local_irq_save ( flags ) ;
blast_icache32 ( ) ;
local_irq_restore ( flags ) ;
}
static inline void tx49_blast_icache32 ( void )
{
unsigned long start = INDEX_BASE ;
unsigned long end = start + current_cpu_data . icache . waysize ;
unsigned long ws_inc = 1UL < < current_cpu_data . icache . waybit ;
unsigned long ws_end = current_cpu_data . icache . ways < <
2013-01-22 15:59:30 +04:00
current_cpu_data . icache . waybit ;
2005-04-17 02:20:36 +04:00
unsigned long ws , addr ;
CACHE32_UNROLL32_ALIGN2 ;
/* I'm in even chunk. blast odd chunks */
2005-09-04 02:56:17 +04:00
for ( ws = 0 ; ws < ws_end ; ws + = ws_inc )
for ( addr = start + 0x400 ; addr < end ; addr + = 0x400 * 2 )
2007-10-12 02:46:15 +04:00
cache32_unroll32 ( addr | ws , Index_Invalidate_I ) ;
2005-04-17 02:20:36 +04:00
CACHE32_UNROLL32_ALIGN ;
/* I'm in odd chunk. blast even chunks */
2005-09-04 02:56:17 +04:00
for ( ws = 0 ; ws < ws_end ; ws + = ws_inc )
for ( addr = start ; addr < end ; addr + = 0x400 * 2 )
2007-10-12 02:46:15 +04:00
cache32_unroll32 ( addr | ws , Index_Invalidate_I ) ;
2005-04-17 02:20:36 +04:00
}
static inline void blast_icache32_r4600_v1_page_indexed ( unsigned long page )
{
unsigned long flags ;
local_irq_save ( flags ) ;
blast_icache32_page_indexed ( page ) ;
local_irq_restore ( flags ) ;
}
static inline void tx49_blast_icache32_page_indexed ( unsigned long page )
{
2006-04-04 12:34:14 +04:00
unsigned long indexmask = current_cpu_data . icache . waysize - 1 ;
unsigned long start = INDEX_BASE + ( page & indexmask ) ;
2005-04-17 02:20:36 +04:00
unsigned long end = start + PAGE_SIZE ;
unsigned long ws_inc = 1UL < < current_cpu_data . icache . waybit ;
unsigned long ws_end = current_cpu_data . icache . ways < <
2013-01-22 15:59:30 +04:00
current_cpu_data . icache . waybit ;
2005-04-17 02:20:36 +04:00
unsigned long ws , addr ;
CACHE32_UNROLL32_ALIGN2 ;
/* I'm in even chunk. blast odd chunks */
2005-09-04 02:56:17 +04:00
for ( ws = 0 ; ws < ws_end ; ws + = ws_inc )
for ( addr = start + 0x400 ; addr < end ; addr + = 0x400 * 2 )
2007-10-12 02:46:15 +04:00
cache32_unroll32 ( addr | ws , Index_Invalidate_I ) ;
2005-04-17 02:20:36 +04:00
CACHE32_UNROLL32_ALIGN ;
/* I'm in odd chunk. blast even chunks */
2005-09-04 02:56:17 +04:00
for ( ws = 0 ; ws < ws_end ; ws + = ws_inc )
for ( addr = start ; addr < end ; addr + = 0x400 * 2 )
2007-10-12 02:46:15 +04:00
cache32_unroll32 ( addr | ws , Index_Invalidate_I ) ;
2005-04-17 02:20:36 +04:00
}
static void ( * r4k_blast_icache_page ) ( unsigned long addr ) ;
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_icache_page_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long ic_lsize = cpu_icache_line_size ( ) ;
2006-06-20 21:06:52 +04:00
if ( ic_lsize = = 0 )
r4k_blast_icache_page = ( void * ) cache_noop ;
else if ( ic_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_icache_page = blast_icache16_page ;
else if ( ic_lsize = = 32 )
r4k_blast_icache_page = blast_icache32_page ;
else if ( ic_lsize = = 64 )
r4k_blast_icache_page = blast_icache64_page ;
}
static void ( * r4k_blast_icache_page_indexed ) ( unsigned long addr ) ;
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_icache_page_indexed_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long ic_lsize = cpu_icache_line_size ( ) ;
2006-06-20 21:06:52 +04:00
if ( ic_lsize = = 0 )
r4k_blast_icache_page_indexed = ( void * ) cache_noop ;
else if ( ic_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_icache_page_indexed = blast_icache16_page_indexed ;
else if ( ic_lsize = = 32 ) {
2005-09-09 23:45:41 +04:00
if ( R4600_V1_INDEX_ICACHEOP_WAR & & cpu_is_r4600_v1_x ( ) )
2005-04-17 02:20:36 +04:00
r4k_blast_icache_page_indexed =
blast_icache32_r4600_v1_page_indexed ;
2005-09-09 23:45:41 +04:00
else if ( TX49XX_ICACHE_INDEX_INV_WAR )
r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed ;
2005-04-17 02:20:36 +04:00
else
r4k_blast_icache_page_indexed =
blast_icache32_page_indexed ;
} else if ( ic_lsize = = 64 )
r4k_blast_icache_page_indexed = blast_icache64_page_indexed ;
}
2012-11-22 06:34:10 +04:00
void ( * r4k_blast_icache ) ( void ) ;
EXPORT_SYMBOL ( r4k_blast_icache ) ;
2005-04-17 02:20:36 +04:00
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_icache_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long ic_lsize = cpu_icache_line_size ( ) ;
2006-06-20 21:06:52 +04:00
if ( ic_lsize = = 0 )
r4k_blast_icache = ( void * ) cache_noop ;
else if ( ic_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_icache = blast_icache16 ;
else if ( ic_lsize = = 32 ) {
if ( R4600_V1_INDEX_ICACHEOP_WAR & & cpu_is_r4600_v1_x ( ) )
r4k_blast_icache = blast_r4600_v1_icache32 ;
else if ( TX49XX_ICACHE_INDEX_INV_WAR )
r4k_blast_icache = tx49_blast_icache32 ;
else
r4k_blast_icache = blast_icache32 ;
} else if ( ic_lsize = = 64 )
r4k_blast_icache = blast_icache64 ;
}
static void ( * r4k_blast_scache_page ) ( unsigned long addr ) ;
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_scache_page_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long sc_lsize = cpu_scache_line_size ( ) ;
2006-02-27 22:05:55 +03:00
if ( scache_size = = 0 )
2006-06-20 21:06:52 +04:00
r4k_blast_scache_page = ( void * ) cache_noop ;
2006-02-27 22:05:55 +03:00
else if ( sc_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_scache_page = blast_scache16_page ;
else if ( sc_lsize = = 32 )
r4k_blast_scache_page = blast_scache32_page ;
else if ( sc_lsize = = 64 )
r4k_blast_scache_page = blast_scache64_page ;
else if ( sc_lsize = = 128 )
r4k_blast_scache_page = blast_scache128_page ;
}
static void ( * r4k_blast_scache_page_indexed ) ( unsigned long addr ) ;
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_scache_page_indexed_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long sc_lsize = cpu_scache_line_size ( ) ;
2006-02-27 22:05:55 +03:00
if ( scache_size = = 0 )
2006-06-20 21:06:52 +04:00
r4k_blast_scache_page_indexed = ( void * ) cache_noop ;
2006-02-27 22:05:55 +03:00
else if ( sc_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_scache_page_indexed = blast_scache16_page_indexed ;
else if ( sc_lsize = = 32 )
r4k_blast_scache_page_indexed = blast_scache32_page_indexed ;
else if ( sc_lsize = = 64 )
r4k_blast_scache_page_indexed = blast_scache64_page_indexed ;
else if ( sc_lsize = = 128 )
r4k_blast_scache_page_indexed = blast_scache128_page_indexed ;
}
static void ( * r4k_blast_scache ) ( void ) ;
2008-03-08 12:56:28 +03:00
static void __cpuinit r4k_blast_scache_setup ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long sc_lsize = cpu_scache_line_size ( ) ;
2006-02-27 22:05:55 +03:00
if ( scache_size = = 0 )
2006-06-20 21:06:52 +04:00
r4k_blast_scache = ( void * ) cache_noop ;
2006-02-27 22:05:55 +03:00
else if ( sc_lsize = = 16 )
2005-04-17 02:20:36 +04:00
r4k_blast_scache = blast_scache16 ;
else if ( sc_lsize = = 32 )
r4k_blast_scache = blast_scache32 ;
else if ( sc_lsize = = 64 )
r4k_blast_scache = blast_scache64 ;
else if ( sc_lsize = = 128 )
r4k_blast_scache = blast_scache128 ;
}
static inline void local_r4k___flush_cache_all ( void * args )
{
2007-06-06 10:52:43 +04:00
# if defined(CONFIG_CPU_LOONGSON2)
r4k_blast_scache ( ) ;
return ;
# endif
2005-04-17 02:20:36 +04:00
r4k_blast_dcache ( ) ;
r4k_blast_icache ( ) ;
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2005-04-17 02:20:36 +04:00
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400SC :
case CPU_R4400MC :
case CPU_R10000 :
case CPU_R12000 :
2006-05-17 06:23:59 +04:00
case CPU_R14000 :
2005-04-17 02:20:36 +04:00
r4k_blast_scache ( ) ;
}
}
static void r4k___flush_cache_all ( void )
{
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k___flush_cache_all , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-08 19:38:37 +04:00
static inline int has_valid_asid ( const struct mm_struct * mm )
{
# if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
int i ;
for_each_online_cpu ( i )
if ( cpu_context ( i , mm ) )
return 1 ;
return 0 ;
# else
return cpu_context ( smp_processor_id ( ) , mm ) ;
# endif
}
2008-04-05 18:13:23 +04:00
static void r4k__flush_cache_vmap ( void )
{
r4k_blast_dcache ( ) ;
}
static void r4k__flush_cache_vunmap ( void )
{
r4k_blast_dcache ( ) ;
}
2005-04-17 02:20:36 +04:00
static inline void local_r4k_flush_cache_range ( void * args )
{
struct vm_area_struct * vma = args ;
2008-02-11 17:51:40 +03:00
int exec = vma - > vm_flags & VM_EXEC ;
2005-04-17 02:20:36 +04:00
2007-10-08 19:38:37 +04:00
if ( ! ( has_valid_asid ( vma - > vm_mm ) ) )
2005-04-17 02:20:36 +04:00
return ;
2006-08-22 16:15:47 +04:00
r4k_blast_dcache ( ) ;
2008-02-11 17:51:40 +03:00
if ( exec )
r4k_blast_icache ( ) ;
2005-04-17 02:20:36 +04:00
}
static void r4k_flush_cache_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
2008-02-11 17:51:40 +03:00
int exec = vma - > vm_flags & VM_EXEC ;
2006-08-22 16:15:47 +04:00
2008-02-11 17:51:40 +03:00
if ( cpu_has_dc_aliases | | ( exec & & ! cpu_has_ic_fills_f_dc ) )
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k_flush_cache_range , vma ) ;
2005-04-17 02:20:36 +04:00
}
static inline void local_r4k_flush_cache_mm ( void * args )
{
struct mm_struct * mm = args ;
2007-10-08 19:38:37 +04:00
if ( ! has_valid_asid ( mm ) )
2005-04-17 02:20:36 +04:00
return ;
/*
* Kludge alert . For obscure reasons R4000SC and R4400SC go nuts if we
* only flush the primary caches but R10000 and R12000 behave sane . . .
2006-11-30 04:14:48 +03:00
* R4000SC and R4400SC indexed S - cache ops also invalidate primary
* caches , so we can bail out early .
2005-04-17 02:20:36 +04:00
*/
2007-10-12 02:46:15 +04:00
if ( current_cpu_type ( ) = = CPU_R4000SC | |
current_cpu_type ( ) = = CPU_R4000MC | |
current_cpu_type ( ) = = CPU_R4400SC | |
current_cpu_type ( ) = = CPU_R4400MC ) {
2005-04-17 02:20:36 +04:00
r4k_blast_scache ( ) ;
2006-11-30 04:14:48 +03:00
return ;
}
r4k_blast_dcache ( ) ;
2005-04-17 02:20:36 +04:00
}
static void r4k_flush_cache_mm ( struct mm_struct * mm )
{
if ( ! cpu_has_dc_aliases )
return ;
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k_flush_cache_mm , mm ) ;
2005-04-17 02:20:36 +04:00
}
struct flush_cache_page_args {
struct vm_area_struct * vma ;
2005-10-12 03:02:34 +04:00
unsigned long addr ;
2006-03-13 12:23:03 +03:00
unsigned long pfn ;
2005-04-17 02:20:36 +04:00
} ;
static inline void local_r4k_flush_cache_page ( void * args )
{
struct flush_cache_page_args * fcp_args = args ;
struct vm_area_struct * vma = fcp_args - > vma ;
2005-10-12 03:02:34 +04:00
unsigned long addr = fcp_args - > addr ;
2007-09-27 21:26:43 +04:00
struct page * page = pfn_to_page ( fcp_args - > pfn ) ;
2005-04-17 02:20:36 +04:00
int exec = vma - > vm_flags & VM_EXEC ;
struct mm_struct * mm = vma - > vm_mm ;
2008-06-15 01:22:08 +04:00
int map_coherent = 0 ;
2005-04-17 02:20:36 +04:00
pgd_t * pgdp ;
2005-02-10 15:19:59 +03:00
pud_t * pudp ;
2005-04-17 02:20:36 +04:00
pmd_t * pmdp ;
pte_t * ptep ;
2007-09-27 21:26:43 +04:00
void * vaddr ;
2005-04-17 02:20:36 +04:00
2005-02-10 16:54:37 +03:00
/*
* If ownes no valid ASID yet , cannot possibly have gotten
* this page into the cache .
*/
2007-10-08 19:38:37 +04:00
if ( ! has_valid_asid ( mm ) )
2005-02-10 16:54:37 +03:00
return ;
2005-10-12 03:02:34 +04:00
addr & = PAGE_MASK ;
pgdp = pgd_offset ( mm , addr ) ;
pudp = pud_offset ( pgdp , addr ) ;
pmdp = pmd_offset ( pudp , addr ) ;
ptep = pte_offset ( pmdp , addr ) ;
2005-04-17 02:20:36 +04:00
/*
* If the page isn ' t marked valid , the page cannot possibly be
* in the cache .
*/
2008-01-29 13:14:55 +03:00
if ( ! ( pte_present ( * ptep ) ) )
2005-04-17 02:20:36 +04:00
return ;
2007-09-27 21:26:43 +04:00
if ( ( mm = = current - > active_mm ) & & ( pte_val ( * ptep ) & _PAGE_VALID ) )
vaddr = NULL ;
else {
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one .
*/
2008-06-15 01:22:08 +04:00
map_coherent = ( cpu_has_dc_aliases & &
page_mapped ( page ) & & ! Page_dcache_dirty ( page ) ) ;
if ( map_coherent )
2007-09-27 21:26:43 +04:00
vaddr = kmap_coherent ( page , addr ) ;
else
2011-11-25 19:14:15 +04:00
vaddr = kmap_atomic ( page ) ;
2007-09-27 21:26:43 +04:00
addr = ( unsigned long ) vaddr ;
2005-04-17 02:20:36 +04:00
}
if ( cpu_has_dc_aliases | | ( exec & & ! cpu_has_ic_fills_f_dc ) ) {
2007-09-27 21:26:43 +04:00
r4k_blast_dcache_page ( addr ) ;
2008-04-28 20:14:26 +04:00
if ( exec & & ! cpu_icache_snoops_remote_store )
r4k_blast_scache_page ( addr ) ;
2005-04-17 02:20:36 +04:00
}
if ( exec ) {
2007-09-27 21:26:43 +04:00
if ( vaddr & & cpu_has_vtag_icache & & mm = = current - > active_mm ) {
2005-04-17 02:20:36 +04:00
int cpu = smp_processor_id ( ) ;
2005-02-19 16:32:02 +03:00
if ( cpu_context ( cpu , mm ) ! = 0 )
drop_mmu_context ( mm , cpu ) ;
2005-04-17 02:20:36 +04:00
} else
2007-09-27 21:26:43 +04:00
r4k_blast_icache_page ( addr ) ;
}
if ( vaddr ) {
2008-06-15 01:22:08 +04:00
if ( map_coherent )
2007-09-27 21:26:43 +04:00
kunmap_coherent ( ) ;
else
2011-11-25 19:14:15 +04:00
kunmap_atomic ( vaddr ) ;
2005-04-17 02:20:36 +04:00
}
}
2005-10-12 03:02:34 +04:00
static void r4k_flush_cache_page ( struct vm_area_struct * vma ,
unsigned long addr , unsigned long pfn )
2005-04-17 02:20:36 +04:00
{
struct flush_cache_page_args args ;
args . vma = vma ;
2005-10-12 03:02:34 +04:00
args . addr = addr ;
2006-03-13 12:23:03 +03:00
args . pfn = pfn ;
2005-04-17 02:20:36 +04:00
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k_flush_cache_page , & args ) ;
2005-04-17 02:20:36 +04:00
}
static inline void local_r4k_flush_data_cache_page ( void * addr )
{
r4k_blast_dcache_page ( ( unsigned long ) addr ) ;
}
static void r4k_flush_data_cache_page ( unsigned long addr )
{
2007-11-03 04:01:37 +03:00
if ( in_atomic ( ) )
local_r4k_flush_data_cache_page ( ( void * ) addr ) ;
else
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k_flush_data_cache_page , ( void * ) addr ) ;
2005-04-17 02:20:36 +04:00
}
struct flush_icache_range_args {
2006-01-28 20:27:51 +03:00
unsigned long start ;
unsigned long end ;
2005-04-17 02:20:36 +04:00
} ;
2008-08-04 22:53:57 +04:00
static inline void local_r4k_flush_icache_range ( unsigned long start , unsigned long end )
2005-04-17 02:20:36 +04:00
{
if ( ! cpu_has_ic_fills_f_dc ) {
2006-06-20 21:06:52 +04:00
if ( end - start > = dcache_size ) {
2005-04-17 02:20:36 +04:00
r4k_blast_dcache ( ) ;
} else {
2005-09-10 00:26:54 +04:00
R4600_HIT_CACHEOP_WAR_IMPL ;
2006-02-09 18:39:06 +03:00
protected_blast_dcache_range ( start , end ) ;
2005-04-17 02:20:36 +04:00
}
}
if ( end - start > icache_size )
r4k_blast_icache ( ) ;
2006-02-09 18:39:06 +03:00
else
protected_blast_icache_range ( start , end ) ;
2005-04-17 02:20:36 +04:00
}
2008-08-04 22:53:57 +04:00
static inline void local_r4k_flush_icache_range_ipi ( void * args )
{
struct flush_icache_range_args * fir_args = args ;
unsigned long start = fir_args - > start ;
unsigned long end = fir_args - > end ;
local_r4k_flush_icache_range ( start , end ) ;
}
2006-01-28 20:27:51 +03:00
static void r4k_flush_icache_range ( unsigned long start , unsigned long end )
2005-04-17 02:20:36 +04:00
{
struct flush_icache_range_args args ;
args . start = start ;
args . end = end ;
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k_flush_icache_range_ipi , & args ) ;
2005-07-12 22:35:38 +04:00
instruction_hazard ( ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv ( unsigned long addr , unsigned long size )
{
/* Catch bad driver code */
BUG_ON ( size = = 0 ) ;
2006-07-06 16:04:01 +04:00
if ( cpu_has_inclusive_pcaches ) {
2006-02-09 18:39:06 +03:00
if ( size > = scache_size )
2005-04-17 02:20:36 +04:00
r4k_blast_scache ( ) ;
2006-02-09 18:39:06 +03:00
else
blast_scache_range ( addr , addr + size ) ;
2010-09-07 08:03:46 +04:00
__sync ( ) ;
2005-04-17 02:20:36 +04:00
return ;
}
/*
* Either no secondary cache or the available caches don ' t have the
* subset property so we have to flush the primary caches
* explicitly
*/
2008-04-28 20:14:26 +04:00
if ( cpu_has_safe_index_cacheops & & size > = dcache_size ) {
2005-04-17 02:20:36 +04:00
r4k_blast_dcache ( ) ;
} else {
R4600_HIT_CACHEOP_WAR_IMPL ;
2006-02-09 18:39:06 +03:00
blast_dcache_range ( addr , addr + size ) ;
2005-04-17 02:20:36 +04:00
}
bc_wback_inv ( addr , size ) ;
2010-09-07 08:03:46 +04:00
__sync ( ) ;
2005-04-17 02:20:36 +04:00
}
static void r4k_dma_cache_inv ( unsigned long addr , unsigned long size )
{
/* Catch bad driver code */
BUG_ON ( size = = 0 ) ;
2006-07-06 16:04:01 +04:00
if ( cpu_has_inclusive_pcaches ) {
2006-02-09 18:39:06 +03:00
if ( size > = scache_size )
2005-04-17 02:20:36 +04:00
r4k_blast_scache ( ) ;
2009-01-11 21:44:49 +03:00
else {
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
* some processors , among them the RM5200 and RM7000
* QED processors will throw an address error for cache
2013-01-22 15:59:30 +04:00
* hit ops with insufficient alignment . Solved by
2009-01-11 21:44:49 +03:00
* aligning the address to cache line size .
*/
2007-11-27 01:40:01 +03:00
blast_inv_scache_range ( addr , addr + size ) ;
2009-01-11 21:44:49 +03:00
}
2010-09-07 08:03:46 +04:00
__sync ( ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2008-04-28 20:14:26 +04:00
if ( cpu_has_safe_index_cacheops & & size > = dcache_size ) {
2005-04-17 02:20:36 +04:00
r4k_blast_dcache ( ) ;
} else {
R4600_HIT_CACHEOP_WAR_IMPL ;
2007-11-27 01:40:01 +03:00
blast_inv_dcache_range ( addr , addr + size ) ;
2005-04-17 02:20:36 +04:00
}
bc_inv ( addr , size ) ;
2010-09-07 08:03:46 +04:00
__sync ( ) ;
2005-04-17 02:20:36 +04:00
}
# endif /* CONFIG_DMA_NONCOHERENT */
/*
* While we ' re protected against bad userland addresses we don ' t care
* very much about what happens in that case . Usually a segmentation
* fault will dump the process later on anyway . . .
*/
static void local_r4k_flush_cache_sigtramp ( void * arg )
{
2005-09-09 23:45:41 +04:00
unsigned long ic_lsize = cpu_icache_line_size ( ) ;
unsigned long dc_lsize = cpu_dcache_line_size ( ) ;
unsigned long sc_lsize = cpu_scache_line_size ( ) ;
2005-04-17 02:20:36 +04:00
unsigned long addr = ( unsigned long ) arg ;
R4600_HIT_CACHEOP_WAR_IMPL ;
2006-06-20 21:06:52 +04:00
if ( dc_lsize )
protected_writeback_dcache_line ( addr & ~ ( dc_lsize - 1 ) ) ;
2006-02-27 22:05:55 +03:00
if ( ! cpu_icache_snoops_remote_store & & scache_size )
2005-04-17 02:20:36 +04:00
protected_writeback_scache_line ( addr & ~ ( sc_lsize - 1 ) ) ;
2006-06-20 21:06:52 +04:00
if ( ic_lsize )
protected_flush_icache_line ( addr & ~ ( ic_lsize - 1 ) ) ;
2005-04-17 02:20:36 +04:00
if ( MIPS4K_ICACHE_REFILL_WAR ) {
__asm__ __volatile__ (
" .set push \n \t "
" .set noat \n \t "
" .set mips3 \n \t "
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_32BIT
2005-04-17 02:20:36 +04:00
" la $at,1f \n \t "
# endif
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
" dla $at,1f \n \t "
# endif
" cache %0,($at) \n \t "
" nop; nop; nop \n "
" 1: \n \t "
" .set pop "
:
: " i " ( Hit_Invalidate_I ) ) ;
}
if ( MIPS_CACHE_SYNC_WAR )
__asm__ __volatile__ ( " sync " ) ;
}
static void r4k_flush_cache_sigtramp ( unsigned long addr )
{
2010-10-29 22:08:25 +04:00
r4k_on_each_cpu ( local_r4k_flush_cache_sigtramp , ( void * ) addr ) ;
2005-04-17 02:20:36 +04:00
}
static void r4k_flush_icache_all ( void )
{
if ( cpu_has_vtag_icache )
r4k_blast_icache ( ) ;
}
2011-06-17 19:20:28 +04:00
struct flush_kernel_vmap_range_args {
unsigned long vaddr ;
int size ;
} ;
static inline void local_r4k_flush_kernel_vmap_range ( void * args )
{
struct flush_kernel_vmap_range_args * vmra = args ;
unsigned long vaddr = vmra - > vaddr ;
int size = vmra - > size ;
/*
* Aliases only affect the primary caches so don ' t bother with
* S - caches or T - caches .
*/
if ( cpu_has_safe_index_cacheops & & size > = dcache_size )
r4k_blast_dcache ( ) ;
else {
R4600_HIT_CACHEOP_WAR_IMPL ;
blast_dcache_range ( vaddr , vaddr + size ) ;
}
}
static void r4k_flush_kernel_vmap_range ( unsigned long vaddr , int size )
{
struct flush_kernel_vmap_range_args args ;
args . vaddr = ( unsigned long ) vaddr ;
args . size = size ;
r4k_on_each_cpu ( local_r4k_flush_kernel_vmap_range , & args ) ;
}
2005-04-17 02:20:36 +04:00
static inline void rm7k_erratum31 ( void )
{
const unsigned long ic_lsize = 32 ;
unsigned long addr ;
/* RM7000 erratum #31. The icache is screwed at startup. */
write_c0_taglo ( 0 ) ;
write_c0_taghi ( 0 ) ;
for ( addr = INDEX_BASE ; addr < = INDEX_BASE + 4096 ; addr + = ic_lsize ) {
__asm__ __volatile__ (
2005-09-02 13:56:12 +04:00
" .set push \n \t "
2005-04-17 02:20:36 +04:00
" .set noreorder \n \t "
" .set mips3 \n \t "
" cache \t %1, 0(%0) \n \t "
" cache \t %1, 0x1000(%0) \n \t "
" cache \t %1, 0x2000(%0) \n \t "
" cache \t %1, 0x3000(%0) \n \t "
" cache \t %2, 0(%0) \n \t "
" cache \t %2, 0x1000(%0) \n \t "
" cache \t %2, 0x2000(%0) \n \t "
" cache \t %2, 0x3000(%0) \n \t "
" cache \t %1, 0(%0) \n \t "
" cache \t %1, 0x1000(%0) \n \t "
" cache \t %1, 0x2000(%0) \n \t "
" cache \t %1, 0x3000(%0) \n \t "
2005-09-02 13:56:12 +04:00
" .set pop \n "
2005-04-17 02:20:36 +04:00
:
: " r " ( addr ) , " i " ( Index_Store_Tag_I ) , " i " ( Fill ) ) ;
}
}
2012-06-26 08:11:03 +04:00
static inline void alias_74k_erratum ( struct cpuinfo_mips * c )
{
/*
* Early versions of the 74 K do not update the cache tags on a
* vtag miss / ptag hit which can occur in the case of KSEG0 / KUSEG
* aliases . In this case it is better to treat the cache as always
* having aliases .
*/
if ( ( c - > processor_id & 0xff ) < = PRID_REV_ENCODE_332 ( 2 , 4 , 0 ) )
c - > dcache . flags | = MIPS_CACHE_VTAG ;
if ( ( c - > processor_id & 0xff ) = = PRID_REV_ENCODE_332 ( 2 , 4 , 0 ) )
write_c0_config6 ( read_c0_config6 ( ) | MIPS_CONF6_SYND ) ;
if ( ( ( c - > processor_id & 0xff00 ) = = PRID_IMP_1074K ) & &
( ( c - > processor_id & 0xff ) < = PRID_REV_ENCODE_332 ( 1 , 1 , 0 ) ) ) {
c - > dcache . flags | = MIPS_CACHE_VTAG ;
write_c0_config6 ( read_c0_config6 ( ) | MIPS_CONF6_SYND ) ;
}
}
2008-03-08 12:56:28 +03:00
static char * way_string [ ] __cpuinitdata = { NULL , " direct mapped " , " 2-way " ,
2005-04-17 02:20:36 +04:00
" 3-way " , " 4-way " , " 5-way " , " 6-way " , " 7-way " , " 8-way "
} ;
2008-03-08 12:56:28 +03:00
static void __cpuinit probe_pcache ( void )
2005-04-17 02:20:36 +04:00
{
struct cpuinfo_mips * c = & current_cpu_data ;
unsigned int config = read_c0_config ( ) ;
unsigned int prid = read_c0_prid ( ) ;
unsigned long config1 ;
unsigned int lsize ;
switch ( c - > cputype ) {
case CPU_R4600 : /* QED style two way caches? */
case CPU_R4700 :
case CPU_R5000 :
case CPU_NEVADA :
icache_size = 1 < < ( 12 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 2 ;
2006-04-07 20:33:31 +04:00
c - > icache . waybit = __ffs ( icache_size / 2 ) ;
2005-04-17 02:20:36 +04:00
dcache_size = 1 < < ( 12 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 2 ;
2006-04-07 20:33:31 +04:00
c - > dcache . waybit = __ffs ( dcache_size / 2 ) ;
2005-04-17 02:20:36 +04:00
c - > options | = MIPS_CPU_CACHE_CDEX_P ;
break ;
case CPU_R5432 :
case CPU_R5500 :
icache_size = 1 < < ( 12 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 2 ;
c - > icache . waybit = 0 ;
dcache_size = 1 < < ( 12 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 2 ;
c - > dcache . waybit = 0 ;
2009-03-18 03:04:01 +03:00
c - > options | = MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_TX49XX :
icache_size = 1 < < ( 12 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 4 ;
c - > icache . waybit = 0 ;
dcache_size = 1 < < ( 12 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 4 ;
c - > dcache . waybit = 0 ;
c - > options | = MIPS_CPU_CACHE_CDEX_P ;
2006-03-17 06:59:22 +03:00
c - > options | = MIPS_CPU_PREFETCH ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_R4000PC :
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400PC :
case CPU_R4400SC :
case CPU_R4400MC :
case CPU_R4300 :
icache_size = 1 < < ( 12 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 1 ;
2013-01-22 15:59:30 +04:00
c - > icache . waybit = 0 ; /* doesn't matter */
2005-04-17 02:20:36 +04:00
dcache_size = 1 < < ( 12 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 1 ;
c - > dcache . waybit = 0 ; /* does not matter */
c - > options | = MIPS_CPU_CACHE_CDEX_P ;
break ;
case CPU_R10000 :
case CPU_R12000 :
2006-05-17 06:23:59 +04:00
case CPU_R14000 :
2005-04-17 02:20:36 +04:00
icache_size = 1 < < ( 12 + ( ( config & R10K_CONF_IC ) > > 29 ) ) ;
c - > icache . linesz = 64 ;
c - > icache . ways = 2 ;
c - > icache . waybit = 0 ;
dcache_size = 1 < < ( 12 + ( ( config & R10K_CONF_DC ) > > 26 ) ) ;
c - > dcache . linesz = 32 ;
c - > dcache . ways = 2 ;
c - > dcache . waybit = 0 ;
c - > options | = MIPS_CPU_PREFETCH ;
break ;
case CPU_VR4133 :
2006-07-07 19:42:12 +04:00
write_c0_config ( config & ~ VR41_CONF_P4K ) ;
2005-04-17 02:20:36 +04:00
case CPU_VR4131 :
/* Workaround for cache instruction bug of VR4131 */
if ( c - > processor_id = = 0x0c80U | | c - > processor_id = = 0x0c81U | |
c - > processor_id = = 0x0c82U ) {
2006-07-04 17:59:41 +04:00
config | = 0x00400000U ;
if ( c - > processor_id = = 0x0c80U )
config | = VR41_CONF_BP ;
2005-04-17 02:20:36 +04:00
write_c0_config ( config ) ;
2006-07-07 19:42:01 +04:00
} else
c - > options | = MIPS_CPU_CACHE_CDEX_P ;
2005-04-17 02:20:36 +04:00
icache_size = 1 < < ( 10 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 2 ;
2006-04-07 20:33:31 +04:00
c - > icache . waybit = __ffs ( icache_size / 2 ) ;
2005-04-17 02:20:36 +04:00
dcache_size = 1 < < ( 10 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 2 ;
2006-04-07 20:33:31 +04:00
c - > dcache . waybit = __ffs ( dcache_size / 2 ) ;
2005-04-17 02:20:36 +04:00
break ;
case CPU_VR41XX :
case CPU_VR4111 :
case CPU_VR4121 :
case CPU_VR4122 :
case CPU_VR4181 :
case CPU_VR4181A :
icache_size = 1 < < ( 10 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 1 ;
2013-01-22 15:59:30 +04:00
c - > icache . waybit = 0 ; /* doesn't matter */
2005-04-17 02:20:36 +04:00
dcache_size = 1 < < ( 10 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 1 ;
c - > dcache . waybit = 0 ; /* does not matter */
c - > options | = MIPS_CPU_CACHE_CDEX_P ;
break ;
case CPU_RM7000 :
rm7k_erratum31 ( ) ;
icache_size = 1 < < ( 12 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
c - > icache . ways = 4 ;
2006-04-07 20:33:31 +04:00
c - > icache . waybit = __ffs ( icache_size / c - > icache . ways ) ;
2005-04-17 02:20:36 +04:00
dcache_size = 1 < < ( 12 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
c - > dcache . ways = 4 ;
2006-04-07 20:33:31 +04:00
c - > dcache . waybit = __ffs ( dcache_size / c - > dcache . ways ) ;
2005-04-17 02:20:36 +04:00
c - > options | = MIPS_CPU_CACHE_CDEX_P ;
c - > options | = MIPS_CPU_PREFETCH ;
break ;
2007-06-06 10:52:43 +04:00
case CPU_LOONGSON2 :
icache_size = 1 < < ( 12 + ( ( config & CONF_IC ) > > 9 ) ) ;
c - > icache . linesz = 16 < < ( ( config & CONF_IB ) > > 5 ) ;
if ( prid & 0x3 )
c - > icache . ways = 4 ;
else
c - > icache . ways = 2 ;
c - > icache . waybit = 0 ;
dcache_size = 1 < < ( 12 + ( ( config & CONF_DC ) > > 6 ) ) ;
c - > dcache . linesz = 16 < < ( ( config & CONF_DB ) > > 4 ) ;
if ( prid & 0x3 )
c - > dcache . ways = 4 ;
else
c - > dcache . ways = 2 ;
c - > dcache . waybit = 0 ;
break ;
2005-04-17 02:20:36 +04:00
default :
if ( ! ( config & MIPS_CONF_M ) )
panic ( " Don't know how to probe P-caches on this cpu. " ) ;
/*
* So we seem to be a MIPS32 or MIPS64 CPU
* So let ' s probe the I - cache . . .
*/
config1 = read_c0_config1 ( ) ;
if ( ( lsize = ( ( config1 > > 19 ) & 7 ) ) )
c - > icache . linesz = 2 < < lsize ;
else
c - > icache . linesz = lsize ;
2012-07-19 11:11:13 +04:00
c - > icache . sets = 32 < < ( ( ( config1 > > 22 ) + 1 ) & 7 ) ;
2005-04-17 02:20:36 +04:00
c - > icache . ways = 1 + ( ( config1 > > 16 ) & 7 ) ;
icache_size = c - > icache . sets *
2013-01-22 15:59:30 +04:00
c - > icache . ways *
c - > icache . linesz ;
2006-04-07 20:33:31 +04:00
c - > icache . waybit = __ffs ( icache_size / c - > icache . ways ) ;
2005-04-17 02:20:36 +04:00
if ( config & 0x8 ) /* VI bit */
c - > icache . flags | = MIPS_CACHE_VTAG ;
/*
* Now probe the MIPS32 / MIPS64 data cache .
*/
c - > dcache . flags = 0 ;
if ( ( lsize = ( ( config1 > > 10 ) & 7 ) ) )
c - > dcache . linesz = 2 < < lsize ;
else
c - > dcache . linesz = lsize ;
2012-07-19 11:11:13 +04:00
c - > dcache . sets = 32 < < ( ( ( config1 > > 13 ) + 1 ) & 7 ) ;
2005-04-17 02:20:36 +04:00
c - > dcache . ways = 1 + ( ( config1 > > 7 ) & 7 ) ;
dcache_size = c - > dcache . sets *
2013-01-22 15:59:30 +04:00
c - > dcache . ways *
c - > dcache . linesz ;
2006-04-07 20:33:31 +04:00
c - > dcache . waybit = __ffs ( dcache_size / c - > dcache . ways ) ;
2005-04-17 02:20:36 +04:00
c - > options | = MIPS_CPU_PREFETCH ;
break ;
}
/*
* Processor configuration sanity check for the R4000SC erratum
2013-01-22 15:59:30 +04:00
* # 5. With page sizes larger than 32 kB there is no possibility
2005-04-17 02:20:36 +04:00
* to get a VCE exception anymore so we don ' t care about this
* misconfiguration . The case is rather theoretical anyway ;
* presumably no vendor is shipping his hardware in the " bad "
* configuration .
*/
if ( ( prid & 0xff00 ) = = PRID_IMP_R4000 & & ( prid & 0xff ) < 0x40 & &
! ( config & CONF_SC ) & & c - > icache . linesz ! = 16 & &
PAGE_SIZE < = 0x8000 )
panic ( " Improper R4000SC processor configuration detected " ) ;
/* compute a couple of other cache variables */
c - > icache . waysize = icache_size / c - > icache . ways ;
c - > dcache . waysize = dcache_size / c - > dcache . ways ;
2006-06-20 21:06:52 +04:00
c - > icache . sets = c - > icache . linesz ?
icache_size / ( c - > icache . linesz * c - > icache . ways ) : 0 ;
c - > dcache . sets = c - > dcache . linesz ?
dcache_size / ( c - > dcache . linesz * c - > dcache . ways ) : 0 ;
2005-04-17 02:20:36 +04:00
/*
* R10000 and R12000 P - caches are odd in a positive way . They ' re 32 kB
* 2 - way virtually indexed so normally would suffer from aliases . So
* normally they ' d suffer from aliases but magic in the hardware deals
* with that for us so we don ' t need to take care ourselves .
*/
2005-02-04 18:51:26 +03:00
switch ( c - > cputype ) {
2005-02-08 00:41:32 +03:00
case CPU_20KC :
2005-02-08 00:53:39 +03:00
case CPU_25KF :
2007-10-12 02:46:05 +04:00
case CPU_SB1 :
case CPU_SB1A :
2011-05-07 00:06:21 +04:00
case CPU_XLR :
2006-03-13 12:23:03 +03:00
c - > dcache . flags | = MIPS_CACHE_PINDEX ;
2007-10-12 02:46:05 +04:00
break ;
2005-02-04 18:51:26 +03:00
case CPU_R10000 :
case CPU_R12000 :
2006-05-17 06:23:59 +04:00
case CPU_R14000 :
2005-02-04 18:51:26 +03:00
break ;
2007-10-12 02:46:05 +04:00
2012-07-07 01:56:00 +04:00
case CPU_M14KC :
2012-12-07 07:51:35 +04:00
case CPU_M14KEC :
2005-02-04 18:51:26 +03:00
case CPU_24K :
2006-04-27 18:50:32 +04:00
case CPU_34K :
2006-06-23 21:48:21 +04:00
case CPU_74K :
2008-04-28 20:14:26 +04:00
case CPU_1004K :
2012-06-26 08:11:03 +04:00
if ( c - > cputype = = CPU_74K )
alias_74k_erratum ( c ) ;
2006-06-20 00:56:25 +04:00
if ( ( read_c0_config7 ( ) & ( 1 < < 16 ) ) ) {
/* effectively physically indexed dcache,
thus no virtual aliases . */
c - > dcache . flags | = MIPS_CACHE_PINDEX ;
break ;
}
2005-02-04 18:51:26 +03:00
default :
2006-06-20 00:56:25 +04:00
if ( c - > dcache . waysize > PAGE_SIZE )
c - > dcache . flags | = MIPS_CACHE_ALIASES ;
2005-02-04 18:51:26 +03:00
}
2005-04-17 02:20:36 +04:00
switch ( c - > cputype ) {
case CPU_20KC :
/*
* Some older 20 Kc chips doesn ' t have the ' VI ' bit in
* the config register .
*/
c - > icache . flags | = MIPS_CACHE_VTAG ;
break ;
2009-03-25 19:49:28 +03:00
case CPU_ALCHEMY :
2005-04-17 02:20:36 +04:00
c - > icache . flags | = MIPS_CACHE_IC_F_DC ;
break ;
}
2013-01-22 15:59:30 +04:00
# ifdef CONFIG_CPU_LOONGSON2
2007-06-06 10:52:43 +04:00
/*
* LOONGSON2 has 4 way icache , but when using indexed cache op ,
* one op will act on all 4 ways
*/
c - > icache . ways = 1 ;
# endif
2005-04-17 02:20:36 +04:00
printk ( " Primary instruction cache %ldkB, %s, %s, linesize %d bytes. \n " ,
icache_size > > 10 ,
2009-04-01 18:11:53 +04:00
c - > icache . flags & MIPS_CACHE_VTAG ? " VIVT " : " VIPT " ,
2005-04-17 02:20:36 +04:00
way_string [ c - > icache . ways ] , c - > icache . linesz ) ;
2007-10-15 19:35:45 +04:00
printk ( " Primary data cache %ldkB, %s, %s, %s, linesize %d bytes \n " ,
dcache_size > > 10 , way_string [ c - > dcache . ways ] ,
( c - > dcache . flags & MIPS_CACHE_PINDEX ) ? " PIPT " : " VIPT " ,
( c - > dcache . flags & MIPS_CACHE_ALIASES ) ?
" cache aliases " : " no aliases " ,
c - > dcache . linesz ) ;
2005-04-17 02:20:36 +04:00
}
/*
* If you even _breathe_ on this function , look at the gcc output and make sure
* it does not pop things on and off the stack for the cache sizing loop that
* executes in KSEG1 space or else you will crash and burn badly . You have
* been warned .
*/
2008-03-08 12:56:28 +03:00
static int __cpuinit probe_scache ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long flags , addr , begin , end , pow2 ;
unsigned int config = read_c0_config ( ) ;
struct cpuinfo_mips * c = & current_cpu_data ;
if ( config & CONF_SC )
return 0 ;
2007-07-28 15:45:47 +04:00
begin = ( unsigned long ) & _stext ;
2005-04-17 02:20:36 +04:00
begin & = ~ ( ( 4 * 1024 * 1024 ) - 1 ) ;
end = begin + ( 4 * 1024 * 1024 ) ;
/*
* This is such a bitch , you ' d think they would make it easy to do
* this . Away you daemons of stupidity !
*/
local_irq_save ( flags ) ;
/* Fill each size-multiple cache line with a valid tag. */
pow2 = ( 64 * 1024 ) ;
for ( addr = begin ; addr < end ; addr = ( begin + pow2 ) ) {
unsigned long * p = ( unsigned long * ) addr ;
__asm__ __volatile__ ( " nop " : : " r " ( * p ) ) ; /* whee... */
pow2 < < = 1 ;
}
/* Load first line with zero (therefore invalid) tag. */
write_c0_taglo ( 0 ) ;
write_c0_taghi ( 0 ) ;
__asm__ __volatile__ ( " nop; nop; nop; nop; " ) ; /* avoid the hazard */
cache_op ( Index_Store_Tag_I , begin ) ;
cache_op ( Index_Store_Tag_D , begin ) ;
cache_op ( Index_Store_Tag_SD , begin ) ;
/* Now search for the wrap around point. */
pow2 = ( 128 * 1024 ) ;
for ( addr = begin + ( 128 * 1024 ) ; addr < end ; addr = begin + pow2 ) {
cache_op ( Index_Load_Tag_SD , addr ) ;
__asm__ __volatile__ ( " nop; nop; nop; nop; " ) ; /* hazard... */
if ( ! read_c0_taglo ( ) )
break ;
pow2 < < = 1 ;
}
local_irq_restore ( flags ) ;
addr - = begin ;
scache_size = addr ;
c - > scache . linesz = 16 < < ( ( config & R4K_CONF_SB ) > > 22 ) ;
c - > scache . ways = 1 ;
c - > dcache . waybit = 0 ; /* does not matter */
return 1 ;
}
2007-06-06 10:52:43 +04:00
# if defined(CONFIG_CPU_LOONGSON2)
static void __init loongson2_sc_init ( void )
{
struct cpuinfo_mips * c = & current_cpu_data ;
scache_size = 512 * 1024 ;
c - > scache . linesz = 32 ;
c - > scache . ways = 4 ;
c - > scache . waybit = 0 ;
c - > scache . waysize = scache_size / ( c - > scache . ways ) ;
c - > scache . sets = scache_size / ( c - > scache . linesz * c - > scache . ways ) ;
pr_info ( " Unified secondary cache %ldkB %s, linesize %d bytes. \n " ,
scache_size > > 10 , way_string [ c - > scache . ways ] , c - > scache . linesz ) ;
c - > options | = MIPS_CPU_INCLUSIVE_CACHES ;
}
# endif
2005-04-17 02:20:36 +04:00
extern int r5k_sc_init ( void ) ;
extern int rm7k_sc_init ( void ) ;
2006-06-20 20:15:20 +04:00
extern int mips_sc_init ( void ) ;
2005-04-17 02:20:36 +04:00
2008-03-08 12:56:28 +03:00
static void __cpuinit setup_scache ( void )
2005-04-17 02:20:36 +04:00
{
struct cpuinfo_mips * c = & current_cpu_data ;
unsigned int config = read_c0_config ( ) ;
int sc_present = 0 ;
/*
* Do the probing thing on R4000SC and R4400SC processors . Other
* processors don ' t have a S - cache that would be relevant to the
2008-02-03 17:54:53 +03:00
* Linux memory management .
2005-04-17 02:20:36 +04:00
*/
switch ( c - > cputype ) {
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400SC :
case CPU_R4400MC :
2005-04-25 20:36:23 +04:00
sc_present = run_uncached ( probe_scache ) ;
2005-04-17 02:20:36 +04:00
if ( sc_present )
c - > options | = MIPS_CPU_CACHE_CDEX_S ;
break ;
case CPU_R10000 :
case CPU_R12000 :
2006-05-17 06:23:59 +04:00
case CPU_R14000 :
2005-04-17 02:20:36 +04:00
scache_size = 0x80000 < < ( ( config & R10K_CONF_SS ) > > 16 ) ;
c - > scache . linesz = 64 < < ( ( config > > 13 ) & 1 ) ;
c - > scache . ways = 2 ;
c - > scache . waybit = 0 ;
sc_present = 1 ;
break ;
case CPU_R5000 :
case CPU_NEVADA :
# ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init ( ) ;
# endif
2013-01-22 15:59:30 +04:00
return ;
2005-04-17 02:20:36 +04:00
case CPU_RM7000 :
# ifdef CONFIG_RM7000_CPU_SCACHE
rm7k_sc_init ( ) ;
# endif
return ;
2007-06-06 10:52:43 +04:00
# if defined(CONFIG_CPU_LOONGSON2)
case CPU_LOONGSON2 :
loongson2_sc_init ( ) ;
return ;
# endif
2011-11-16 04:21:20 +04:00
case CPU_XLP :
/* don't need to worry about L2, fully coherent */
return ;
2007-06-06 10:52:43 +04:00
2005-04-17 02:20:36 +04:00
default :
2013-04-01 22:14:28 +04:00
if ( c - > isa_level & ( MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 ) ) {
2006-06-20 20:15:20 +04:00
# ifdef CONFIG_MIPS_CPU_SCACHE
if ( mips_sc_init ( ) ) {
scache_size = c - > scache . ways * c - > scache . sets * c - > scache . linesz ;
printk ( " MIPS secondary cache %ldkB, %s, linesize %d bytes. \n " ,
scache_size > > 10 ,
way_string [ c - > scache . ways ] , c - > scache . linesz ) ;
}
# else
if ( ! ( c - > scache . flags & MIPS_CACHE_NOT_PRESENT ) )
panic ( " Dunno how to handle MIPS32 / MIPS64 second level cache " ) ;
# endif
return ;
}
2005-04-17 02:20:36 +04:00
sc_present = 0 ;
}
if ( ! sc_present )
return ;
/* compute a couple of other cache variables */
c - > scache . waysize = scache_size / c - > scache . ways ;
c - > scache . sets = scache_size / ( c - > scache . linesz * c - > scache . ways ) ;
printk ( " Unified secondary cache %ldkB %s, linesize %d bytes. \n " ,
scache_size > > 10 , way_string [ c - > scache . ways ] , c - > scache . linesz ) ;
2006-07-06 16:04:01 +04:00
c - > options | = MIPS_CPU_INCLUSIVE_CACHES ;
2005-04-17 02:20:36 +04:00
}
2006-05-26 19:44:54 +04:00
void au1x00_fixup_config_od ( void )
{
/*
* c0_config . od ( bit 19 ) was write only ( and read as 0 )
* on the early revisions of Alchemy SOCs . It disables the bus
* transaction overlapping and needs to be set to fix various errata .
*/
switch ( read_c0_prid ( ) ) {
case 0x00030100 : /* Au1000 DA */
case 0x00030201 : /* Au1000 HA */
case 0x00030202 : /* Au1000 HB */
case 0x01030200 : /* Au1500 AB */
/*
* Au1100 errata actually keeps silence about this bit , so we set it
* just in case for those revisions that require it to be set according
2009-03-25 19:49:28 +03:00
* to the ( now gone ) cpu table .
2006-05-26 19:44:54 +04:00
*/
case 0x02030200 : /* Au1100 AB */
case 0x02030201 : /* Au1100 BA */
case 0x02030202 : /* Au1100 BC */
set_c0_config ( 1 < < 19 ) ;
break ;
}
}
2008-06-12 20:26:02 +04:00
/* CP0 hazard avoidance. */
# define NXP_BARRIER() \
__asm__ __volatile__ ( \
" .set noreorder \n \t " \
" nop; nop; nop; nop; nop; nop; \n \t " \
" .set reorder \n \t " )
static void nxp_pr4450_fixup_config ( void )
{
unsigned long config0 ;
config0 = read_c0_config ( ) ;
/* clear all three cache coherency fields */
config0 & = ~ ( 0x7 | ( 7 < < 25 ) | ( 7 < < 28 ) ) ;
config0 | = ( ( ( _page_cachable_default > > _CACHE_SHIFT ) < < 0 ) |
( ( _page_cachable_default > > _CACHE_SHIFT ) < < 25 ) |
( ( _page_cachable_default > > _CACHE_SHIFT ) < < 28 ) ) ;
write_c0_config ( config0 ) ;
NXP_BARRIER ( ) ;
}
2007-09-19 03:58:24 +04:00
static int __cpuinitdata cca = - 1 ;
static int __init cca_setup ( char * str )
{
get_option ( & str , & cca ) ;
2012-06-14 06:26:40 +04:00
return 0 ;
2007-09-19 03:58:24 +04:00
}
2012-06-14 06:26:40 +04:00
early_param ( " cca " , cca_setup ) ;
2007-09-19 03:58:24 +04:00
2008-03-08 12:56:28 +03:00
static void __cpuinit coherency_setup ( void )
2005-04-17 02:20:36 +04:00
{
2007-09-19 03:58:24 +04:00
if ( cca < 0 | | cca > 7 )
cca = read_c0_config ( ) & CONF_CM_CMASK ;
_page_cachable_default = cca < < _CACHE_SHIFT ;
pr_debug ( " Using cache attribute %d \n " , cca ) ;
change_c0_config ( CONF_CM_CMASK , cca ) ;
2005-04-17 02:20:36 +04:00
/*
* c0_status . cu = 0 specifies that updates by the sc instruction use
* the coherency mode specified by the TLB ; 1 means cachable
* coherent update on write will be used . Not all processors have
* this bit and ; some wire it to zero , others like Toshiba had the
* silly idea of putting something else there . . .
*/
2007-10-12 02:46:15 +04:00
switch ( current_cpu_type ( ) ) {
2005-04-17 02:20:36 +04:00
case CPU_R4000PC :
case CPU_R4000SC :
case CPU_R4000MC :
case CPU_R4400PC :
case CPU_R4400SC :
case CPU_R4400MC :
clear_c0_config ( CONF_CU ) ;
break ;
2006-05-26 19:44:54 +04:00
/*
2006-08-02 02:42:30 +04:00
* We need to catch the early Alchemy SOCs with
2009-03-25 19:49:28 +03:00
* the write - only co_config . od bit and set it back to one on :
* Au1000 rev DA , HA , HB ; Au1100 AB , BA , BC , Au1500 AB
2006-05-26 19:44:54 +04:00
*/
2009-03-25 19:49:28 +03:00
case CPU_ALCHEMY :
2006-05-26 19:44:54 +04:00
au1x00_fixup_config_od ( ) ;
break ;
2008-06-12 20:26:02 +04:00
case PRID_IMP_PR4450 :
nxp_pr4450_fixup_config ( ) ;
break ;
2005-04-17 02:20:36 +04:00
}
}
2012-05-15 11:04:49 +04:00
static void __cpuinit r4k_cache_error_setup ( void )
2005-04-17 02:20:36 +04:00
{
2007-10-12 02:46:05 +04:00
extern char __weak except_vec2_generic ;
extern char __weak except_vec2_sb1 ;
2005-04-17 02:20:36 +04:00
struct cpuinfo_mips * c = & current_cpu_data ;
2007-10-12 02:46:05 +04:00
switch ( c - > cputype ) {
case CPU_SB1 :
case CPU_SB1A :
set_uncached_handler ( 0x100 , & except_vec2_sb1 , 0x80 ) ;
break ;
default :
set_uncached_handler ( 0x100 , & except_vec2_generic , 0x80 ) ;
break ;
}
2012-05-15 11:04:49 +04:00
}
void __cpuinit r4k_cache_init ( void )
{
extern void build_clear_page ( void ) ;
extern void build_copy_page ( void ) ;
struct cpuinfo_mips * c = & current_cpu_data ;
2005-04-17 02:20:36 +04:00
probe_pcache ( ) ;
setup_scache ( ) ;
r4k_blast_dcache_page_setup ( ) ;
r4k_blast_dcache_page_indexed_setup ( ) ;
r4k_blast_dcache_setup ( ) ;
r4k_blast_icache_page_setup ( ) ;
r4k_blast_icache_page_indexed_setup ( ) ;
r4k_blast_icache_setup ( ) ;
r4k_blast_scache_page_setup ( ) ;
r4k_blast_scache_page_indexed_setup ( ) ;
r4k_blast_scache_setup ( ) ;
/*
* Some MIPS32 and MIPS64 processors have physically indexed caches .
* This code supports virtually indexed processors and will be
* unnecessarily inefficient on physically indexed processors .
*/
2006-06-20 21:06:52 +04:00
if ( c - > dcache . linesz )
shm_align_mask = max_t ( unsigned long ,
c - > dcache . sets * c - > dcache . linesz - 1 ,
PAGE_SIZE - 1 ) ;
else
shm_align_mask = PAGE_SIZE - 1 ;
2008-04-05 18:13:23 +04:00
__flush_cache_vmap = r4k__flush_cache_vmap ;
__flush_cache_vunmap = r4k__flush_cache_vunmap ;
2007-09-27 21:26:43 +04:00
flush_cache_all = cache_noop ;
2005-04-17 02:20:36 +04:00
__flush_cache_all = r4k___flush_cache_all ;
flush_cache_mm = r4k_flush_cache_mm ;
flush_cache_page = r4k_flush_cache_page ;
flush_cache_range = r4k_flush_cache_range ;
2011-06-17 19:20:28 +04:00
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range ;
2005-04-17 02:20:36 +04:00
flush_cache_sigtramp = r4k_flush_cache_sigtramp ;
flush_icache_all = r4k_flush_icache_all ;
2006-04-05 23:42:04 +04:00
local_flush_data_cache_page = local_r4k_flush_data_cache_page ;
2005-04-17 02:20:36 +04:00
flush_data_cache_page = r4k_flush_data_cache_page ;
flush_icache_range = r4k_flush_icache_range ;
2008-08-04 22:53:57 +04:00
local_flush_icache_range = local_r4k_flush_icache_range ;
2005-04-17 02:20:36 +04:00
2008-04-28 20:14:26 +04:00
# if defined(CONFIG_DMA_NONCOHERENT)
if ( coherentio ) {
_dma_cache_wback_inv = ( void * ) cache_noop ;
_dma_cache_wback = ( void * ) cache_noop ;
_dma_cache_inv = ( void * ) cache_noop ;
} else {
_dma_cache_wback_inv = r4k_dma_cache_wback_inv ;
_dma_cache_wback = r4k_dma_cache_wback_inv ;
_dma_cache_inv = r4k_dma_cache_inv ;
}
2005-04-17 02:20:36 +04:00
# endif
build_clear_page ( ) ;
build_copy_page ( ) ;
2013-03-25 22:47:29 +04:00
/*
* We want to run CMP kernels on core with and without coherent
* caches . Therefore , do not use CONFIG_MIPS_CMP to decide whether
* or not to flush caches .
*/
2005-07-15 19:23:23 +04:00
local_r4k___flush_cache_all ( NULL ) ;
2013-03-25 22:47:29 +04:00
2005-07-15 19:23:23 +04:00
coherency_setup ( ) ;
2012-05-15 11:04:49 +04:00
board_cache_error_setup = r4k_cache_error_setup ;
2005-04-17 02:20:36 +04:00
}