2009-03-27 16:25:16 +03:00
/*
* Cache control for MicroBlaze cache memories
*
* Copyright ( C ) 2007 - 2009 Michal Simek < monstr @ monstr . eu >
* Copyright ( C ) 2007 - 2009 PetaLogix
2009-12-10 13:43:57 +03:00
* Copyright ( C ) 2007 - 2009 John Williams < john . williams @ petalogix . com >
2009-03-27 16:25:16 +03:00
*
* This file is subject to the terms and conditions of the GNU General
* Public License . See the file COPYING in the main directory of this
* archive for more details .
*/
# include <asm/cacheflush.h>
# include <linux/cache.h>
# include <asm/cpuinfo.h>
2009-12-10 13:43:57 +03:00
# include <asm/pvr.h>
2009-03-27 16:25:16 +03:00
2009-12-10 13:43:57 +03:00
static inline void __enable_icache_msr ( void )
2009-03-27 16:25:16 +03:00
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " msrset r0, %0; " \
" nop; " \
2009-12-10 13:43:57 +03:00
: : " i " ( MSR_ICE ) : " memory " ) ;
}
static inline void __disable_icache_msr ( void )
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " msrclr r0, %0; " \
" nop; " \
2009-12-10 13:43:57 +03:00
: : " i " ( MSR_ICE ) : " memory " ) ;
}
static inline void __enable_dcache_msr ( void )
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " msrset r0, %0; " \
" nop; " \
: : " i " ( MSR_DCE ) : " memory " ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static inline void __disable_dcache_msr ( void )
2009-03-27 16:25:16 +03:00
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " msrclr r0, %0; " \
" nop; " \
: : " i " ( MSR_DCE ) : " memory " ) ;
2009-12-10 13:43:57 +03:00
}
static inline void __enable_icache_nomsr ( void )
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " mfs r12, rmsr; " \
" nop; " \
" ori r12, r12, %0; " \
" mts rmsr, r12; " \
" nop; " \
: : " i " ( MSR_ICE ) : " memory " , " r12 " ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static inline void __disable_icache_nomsr ( void )
2009-03-27 16:25:16 +03:00
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " mfs r12, rmsr; " \
" nop; " \
" andi r12, r12, ~%0; " \
" mts rmsr, r12; " \
" nop; " \
: : " i " ( MSR_ICE ) : " memory " , " r12 " ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static inline void __enable_dcache_nomsr ( void )
2009-03-27 16:25:16 +03:00
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " mfs r12, rmsr; " \
" nop; " \
" ori r12, r12, %0; " \
" mts rmsr, r12; " \
" nop; " \
: : " i " ( MSR_DCE ) : " memory " , " r12 " ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static inline void __disable_dcache_nomsr ( void )
2009-03-27 16:25:16 +03:00
{
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " mfs r12, rmsr; " \
" nop; " \
" andi r12, r12, ~%0; " \
" mts rmsr, r12; " \
" nop; " \
: : " i " ( MSR_DCE ) : " memory " , " r12 " ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
2010-04-26 10:54:13 +04:00
/* Helper macro for computing the limits of cache range loops
*
* End address can be unaligned which is OK for C implementation .
* ASM implementation align it in ASM macros
*/
2009-12-10 13:43:57 +03:00
# define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
do { \
int align = ~ ( cache_line_length - 1 ) ; \
end = min ( start + cache_size , end ) ; \
start & = align ; \
2012-12-27 13:40:38 +04:00
} while ( 0 )
2009-12-10 13:43:57 +03:00
/*
* Helper macro to loop over the specified cache_size / line_length and
* execute ' op ' on that cacheline
*/
# define CACHE_ALL_LOOP(cache_size, line_length, op) \
do { \
2010-04-26 10:54:13 +04:00
unsigned int len = cache_size - line_length ; \
2009-12-10 13:43:57 +03:00
int step = - line_length ; \
2010-04-26 10:54:13 +04:00
WARN_ON ( step > = 0 ) ; \
2009-12-10 13:43:57 +03:00
\
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " 1: " # op " %0, r0; " \
" bgtid %0, 1b; " \
" addk %0, %0, %1; " \
: : " r " ( len ) , " r " ( step ) \
2009-12-10 13:43:57 +03:00
: " memory " ) ; \
2012-12-27 13:40:38 +04:00
} while ( 0 )
2009-12-10 13:43:57 +03:00
2010-04-26 10:54:13 +04:00
/* Used for wdc.flush/clear which can use rB for offset which is not possible
* to use for simple wdc or wic .
*
* start address is cache aligned
2011-03-31 05:57:33 +04:00
* end address is not aligned , if end is aligned then I have to subtract
2010-04-26 10:54:13 +04:00
* cacheline length because I can ' t flush / invalidate the next cacheline .
* If is not , I align it because I will flush / invalidate whole line .
*/
2009-12-10 13:43:57 +03:00
# define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
do { \
int step = - line_length ; \
2010-04-26 10:54:13 +04:00
int align = ~ ( line_length - 1 ) ; \
2010-05-13 12:55:47 +04:00
int count ; \
2010-04-26 10:54:13 +04:00
end = ( ( end & align ) = = end ) ? end - line_length : end & align ; \
2010-05-13 12:55:47 +04:00
count = end - start ; \
2010-04-26 10:54:13 +04:00
WARN_ON ( count < 0 ) ; \
2009-12-10 13:43:57 +03:00
\
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " 1: " # op " %0, %1; " \
" bgtid %1, 1b; " \
" addk %1, %1, %2; " \
: : " r " ( start ) , " r " ( count ) , \
2009-12-10 13:43:57 +03:00
" r " ( step ) : " memory " ) ; \
2012-12-27 13:40:38 +04:00
} while ( 0 )
2009-12-10 13:43:57 +03:00
/* It is used only first parameter for OP - for wic, wdc */
# define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
2014-12-18 17:38:00 +03:00
unsigned int volatile temp = 0 ; \
unsigned int align = ~ ( line_length - 1 ) ; \
2010-04-26 10:54:13 +04:00
end = ( ( end & align ) = = end ) ? end - line_length : end & align ; \
2014-12-18 17:38:00 +03:00
WARN_ON ( end < start ) ; \
2009-12-10 13:43:57 +03:00
\
2012-12-27 13:40:38 +04:00
__asm__ __volatile__ ( " 1: " # op " %1, r0; " \
" cmpu %0, %1, %2; " \
" bgtid %0, 1b; " \
" addk %1, %1, %3; " \
: : " r " ( temp ) , " r " ( start ) , " r " ( end ) , \
2010-02-15 12:50:42 +03:00
" r " ( line_length ) : " memory " ) ; \
2012-12-27 13:40:38 +04:00
} while ( 0 )
2009-12-10 13:43:57 +03:00
2010-02-15 18:41:40 +03:00
# define ASM_LOOP
2009-12-10 13:43:57 +03:00
static void __flush_icache_range_msr_irq ( unsigned long start , unsigned long end )
2009-03-27 16:25:16 +03:00
{
2009-12-10 13:43:57 +03:00
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . icache_line_length , cpuinfo . icache_size ) ;
local_irq_save ( flags ) ;
__disable_icache_msr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_1 ( start , end , cpuinfo . icache_line_length , wic ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = start ; i < end ; i + = cpuinfo . icache_line_length )
__asm__ __volatile__ ( " wic %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_icache_msr ( ) ;
local_irq_restore ( flags ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __flush_icache_range_nomsr_irq ( unsigned long start ,
unsigned long end )
2009-03-27 16:25:16 +03:00
{
2009-12-10 13:43:57 +03:00
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
2009-03-27 16:25:16 +03:00
2009-12-10 13:43:57 +03:00
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . icache_line_length , cpuinfo . icache_size ) ;
2009-03-27 16:25:16 +03:00
2009-12-10 13:43:57 +03:00
local_irq_save ( flags ) ;
__disable_icache_nomsr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_1 ( start , end , cpuinfo . icache_line_length , wic ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = start ; i < end ; i + = cpuinfo . icache_line_length )
__asm__ __volatile__ ( " wic %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_icache_nomsr ( ) ;
local_irq_restore ( flags ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __flush_icache_range_noirq ( unsigned long start ,
unsigned long end )
2009-03-27 16:25:16 +03:00
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . icache_line_length , cpuinfo . icache_size ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_1 ( start , end , cpuinfo . icache_line_length , wic ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = start ; i < end ; i + = cpuinfo . icache_line_length )
__asm__ __volatile__ ( " wic %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
}
static void __flush_icache_all_msr_irq ( void )
{
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
local_irq_save ( flags ) ;
__disable_icache_msr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_ALL_LOOP ( cpuinfo . icache_size , cpuinfo . icache_line_length , wic ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . icache_size ;
i + = cpuinfo . icache_line_length )
__asm__ __volatile__ ( " wic %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_icache_msr ( ) ;
local_irq_restore ( flags ) ;
}
static void __flush_icache_all_nomsr_irq ( void )
{
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
local_irq_save ( flags ) ;
__disable_icache_nomsr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_ALL_LOOP ( cpuinfo . icache_size , cpuinfo . icache_line_length , wic ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . icache_size ;
i + = cpuinfo . icache_line_length )
__asm__ __volatile__ ( " wic %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_icache_nomsr ( ) ;
local_irq_restore ( flags ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __flush_icache_all_noirq ( void )
2009-03-27 16:25:16 +03:00
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_ALL_LOOP ( cpuinfo . icache_size , cpuinfo . icache_line_length , wic ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . icache_size ;
i + = cpuinfo . icache_line_length )
__asm__ __volatile__ ( " wic %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __invalidate_dcache_all_msr_irq ( void )
2009-03-27 16:25:16 +03:00
{
2009-12-10 13:43:57 +03:00
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
local_irq_save ( flags ) ;
__disable_dcache_msr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_ALL_LOOP ( cpuinfo . dcache_size , cpuinfo . dcache_line_length , wdc ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . dcache_size ;
i + = cpuinfo . dcache_line_length )
__asm__ __volatile__ ( " wdc %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_dcache_msr ( ) ;
local_irq_restore ( flags ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __invalidate_dcache_all_nomsr_irq ( void )
2009-03-27 16:25:16 +03:00
{
2009-12-10 13:43:57 +03:00
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
local_irq_save ( flags ) ;
__disable_dcache_nomsr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_ALL_LOOP ( cpuinfo . dcache_size , cpuinfo . dcache_line_length , wdc ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . dcache_size ;
i + = cpuinfo . dcache_line_length )
__asm__ __volatile__ ( " wdc %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_dcache_nomsr ( ) ;
local_irq_restore ( flags ) ;
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __invalidate_dcache_all_noirq_wt ( void )
2009-03-27 16:25:16 +03:00
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2012-12-27 13:40:38 +04:00
CACHE_ALL_LOOP ( cpuinfo . dcache_size , cpuinfo . dcache_line_length , wdc ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . dcache_size ;
i + = cpuinfo . dcache_line_length )
__asm__ __volatile__ ( " wdc %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-03-27 16:25:16 +03:00
}
2012-12-27 13:40:38 +04:00
/*
* FIXME It is blindly invalidation as is expected
2010-04-26 10:54:13 +04:00
* but can ' t be called on noMMU in microblaze_cache_init below
*
* MS : noMMU kernel won ' t boot if simple wdc is used
* The reason should be that there are discared data which kernel needs
*/
2009-12-10 13:43:57 +03:00
static void __invalidate_dcache_all_wb ( void )
2009-03-27 16:25:16 +03:00
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2010-04-26 10:54:13 +04:00
CACHE_ALL_LOOP ( cpuinfo . dcache_size , cpuinfo . dcache_line_length ,
2012-12-27 13:40:38 +04:00
wdc ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . dcache_size ;
i + = cpuinfo . dcache_line_length )
2010-04-26 10:54:13 +04:00
__asm__ __volatile__ ( " wdc %0, r0; " \
2010-02-15 18:41:40 +03:00
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
}
static void __invalidate_dcache_range_wb ( unsigned long start ,
unsigned long end )
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . dcache_line_length , cpuinfo . dcache_size ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_2 ( start , end , cpuinfo . dcache_line_length , wdc . clear ) ;
2010-02-15 18:41:40 +03:00
# else
2010-04-23 13:38:43 +04:00
for ( i = start ; i < end ; i + = cpuinfo . dcache_line_length )
2010-02-15 18:41:40 +03:00
__asm__ __volatile__ ( " wdc.clear %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
}
static void __invalidate_dcache_range_nomsr_wt ( unsigned long start ,
unsigned long end )
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . dcache_line_length , cpuinfo . dcache_size ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_1 ( start , end , cpuinfo . dcache_line_length , wdc ) ;
2010-02-15 18:41:40 +03:00
# else
2010-04-23 13:38:43 +04:00
for ( i = start ; i < end ; i + = cpuinfo . dcache_line_length )
2010-02-15 18:41:40 +03:00
__asm__ __volatile__ ( " wdc %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __invalidate_dcache_range_msr_irq_wt ( unsigned long start ,
unsigned long end )
2009-03-27 16:25:16 +03:00
{
2009-12-10 13:43:57 +03:00
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . dcache_line_length , cpuinfo . dcache_size ) ;
local_irq_save ( flags ) ;
__disable_dcache_msr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_1 ( start , end , cpuinfo . dcache_line_length , wdc ) ;
2010-02-15 18:41:40 +03:00
# else
2010-04-23 13:38:43 +04:00
for ( i = start ; i < end ; i + = cpuinfo . dcache_line_length )
2010-02-15 18:41:40 +03:00
__asm__ __volatile__ ( " wdc %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_dcache_msr ( ) ;
local_irq_restore ( flags ) ;
}
static void __invalidate_dcache_range_nomsr_irq ( unsigned long start ,
unsigned long end )
{
unsigned long flags ;
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . dcache_line_length , cpuinfo . dcache_size ) ;
local_irq_save ( flags ) ;
__disable_dcache_nomsr ( ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_1 ( start , end , cpuinfo . dcache_line_length , wdc ) ;
2010-02-15 18:41:40 +03:00
# else
2010-04-23 13:38:43 +04:00
for ( i = start ; i < end ; i + = cpuinfo . dcache_line_length )
2010-02-15 18:41:40 +03:00
__asm__ __volatile__ ( " wdc %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
__enable_dcache_nomsr ( ) ;
local_irq_restore ( flags ) ;
}
static void __flush_dcache_all_wb ( void )
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s \n " , __func__ ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_ALL_LOOP ( cpuinfo . dcache_size , cpuinfo . dcache_line_length ,
wdc . flush ) ;
2010-02-15 18:41:40 +03:00
# else
for ( i = 0 ; i < cpuinfo . dcache_size ;
i + = cpuinfo . dcache_line_length )
__asm__ __volatile__ ( " wdc.flush %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-03-27 16:25:16 +03:00
}
2009-12-10 13:43:57 +03:00
static void __flush_dcache_range_wb ( unsigned long start , unsigned long end )
2009-03-27 16:25:16 +03:00
{
2010-02-15 18:41:40 +03:00
# ifndef ASM_LOOP
int i ;
# endif
2009-12-10 13:43:57 +03:00
pr_debug ( " %s: start 0x%x, end 0x%x \n " , __func__ ,
( unsigned int ) start , ( unsigned int ) end ) ;
CACHE_LOOP_LIMITS ( start , end ,
cpuinfo . dcache_line_length , cpuinfo . dcache_size ) ;
2010-02-15 18:41:40 +03:00
# ifdef ASM_LOOP
2009-12-10 13:43:57 +03:00
CACHE_RANGE_LOOP_2 ( start , end , cpuinfo . dcache_line_length , wdc . flush ) ;
2010-02-15 18:41:40 +03:00
# else
2010-04-23 13:38:43 +04:00
for ( i = start ; i < end ; i + = cpuinfo . dcache_line_length )
2010-02-15 18:41:40 +03:00
__asm__ __volatile__ ( " wdc.flush %0, r0; " \
: : " r " ( i ) ) ;
# endif
2009-12-10 13:43:57 +03:00
}
/* struct for wb caches and for wt caches */
struct scache * mbc ;
/* new wb cache model */
2011-02-07 14:21:42 +03:00
static const struct scache wb_msr = {
2009-12-10 13:43:57 +03:00
. ie = __enable_icache_msr ,
. id = __disable_icache_msr ,
. ifl = __flush_icache_all_noirq ,
. iflr = __flush_icache_range_noirq ,
. iin = __flush_icache_all_noirq ,
. iinr = __flush_icache_range_noirq ,
. de = __enable_dcache_msr ,
. dd = __disable_dcache_msr ,
. dfl = __flush_dcache_all_wb ,
. dflr = __flush_dcache_range_wb ,
. din = __invalidate_dcache_all_wb ,
. dinr = __invalidate_dcache_range_wb ,
} ;
/* There is only difference in ie, id, de, dd functions */
2011-02-07 14:21:42 +03:00
static const struct scache wb_nomsr = {
2009-12-10 13:43:57 +03:00
. ie = __enable_icache_nomsr ,
. id = __disable_icache_nomsr ,
. ifl = __flush_icache_all_noirq ,
. iflr = __flush_icache_range_noirq ,
. iin = __flush_icache_all_noirq ,
. iinr = __flush_icache_range_noirq ,
. de = __enable_dcache_nomsr ,
. dd = __disable_dcache_nomsr ,
. dfl = __flush_dcache_all_wb ,
. dflr = __flush_dcache_range_wb ,
. din = __invalidate_dcache_all_wb ,
. dinr = __invalidate_dcache_range_wb ,
} ;
/* Old wt cache model with disabling irq and turn off cache */
2011-02-07 14:21:42 +03:00
static const struct scache wt_msr = {
2009-12-10 13:43:57 +03:00
. ie = __enable_icache_msr ,
. id = __disable_icache_msr ,
. ifl = __flush_icache_all_msr_irq ,
. iflr = __flush_icache_range_msr_irq ,
. iin = __flush_icache_all_msr_irq ,
. iinr = __flush_icache_range_msr_irq ,
. de = __enable_dcache_msr ,
. dd = __disable_dcache_msr ,
. dfl = __invalidate_dcache_all_msr_irq ,
. dflr = __invalidate_dcache_range_msr_irq_wt ,
. din = __invalidate_dcache_all_msr_irq ,
. dinr = __invalidate_dcache_range_msr_irq_wt ,
} ;
2011-02-07 14:21:42 +03:00
static const struct scache wt_nomsr = {
2009-12-10 13:43:57 +03:00
. ie = __enable_icache_nomsr ,
. id = __disable_icache_nomsr ,
. ifl = __flush_icache_all_nomsr_irq ,
. iflr = __flush_icache_range_nomsr_irq ,
. iin = __flush_icache_all_nomsr_irq ,
. iinr = __flush_icache_range_nomsr_irq ,
. de = __enable_dcache_nomsr ,
. dd = __disable_dcache_nomsr ,
. dfl = __invalidate_dcache_all_nomsr_irq ,
. dflr = __invalidate_dcache_range_nomsr_irq ,
. din = __invalidate_dcache_all_nomsr_irq ,
. dinr = __invalidate_dcache_range_nomsr_irq ,
} ;
/* New wt cache model for newer Microblaze versions */
2011-02-07 14:21:42 +03:00
static const struct scache wt_msr_noirq = {
2009-12-10 13:43:57 +03:00
. ie = __enable_icache_msr ,
. id = __disable_icache_msr ,
. ifl = __flush_icache_all_noirq ,
. iflr = __flush_icache_range_noirq ,
. iin = __flush_icache_all_noirq ,
. iinr = __flush_icache_range_noirq ,
. de = __enable_dcache_msr ,
. dd = __disable_dcache_msr ,
. dfl = __invalidate_dcache_all_noirq_wt ,
. dflr = __invalidate_dcache_range_nomsr_wt ,
. din = __invalidate_dcache_all_noirq_wt ,
. dinr = __invalidate_dcache_range_nomsr_wt ,
} ;
2011-02-07 14:21:42 +03:00
static const struct scache wt_nomsr_noirq = {
2009-12-10 13:43:57 +03:00
. ie = __enable_icache_nomsr ,
. id = __disable_icache_nomsr ,
. ifl = __flush_icache_all_noirq ,
. iflr = __flush_icache_range_noirq ,
. iin = __flush_icache_all_noirq ,
. iinr = __flush_icache_range_noirq ,
. de = __enable_dcache_nomsr ,
. dd = __disable_dcache_nomsr ,
. dfl = __invalidate_dcache_all_noirq_wt ,
. dflr = __invalidate_dcache_range_nomsr_wt ,
. din = __invalidate_dcache_all_noirq_wt ,
. dinr = __invalidate_dcache_range_nomsr_wt ,
} ;
/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
# define CPUVER_7_20_A 0x0c
# define CPUVER_7_20_D 0x0f
void microblaze_cache_init ( void )
{
if ( cpuinfo . use_instr & PVR2_USE_MSR_INSTR ) {
if ( cpuinfo . dcache_wb ) {
2012-12-27 13:40:38 +04:00
pr_info ( " wb_msr \n " ) ;
2009-12-10 13:43:57 +03:00
mbc = ( struct scache * ) & wb_msr ;
2010-05-31 23:16:30 +04:00
if ( cpuinfo . ver_code < = CPUVER_7_20_D ) {
2009-12-10 13:43:57 +03:00
/* MS: problem with signal handling - hw bug */
2012-12-27 13:40:38 +04:00
pr_info ( " WB won't work properly \n " ) ;
2009-12-10 13:43:57 +03:00
}
} else {
if ( cpuinfo . ver_code > = CPUVER_7_20_A ) {
2012-12-27 13:40:38 +04:00
pr_info ( " wt_msr_noirq \n " ) ;
2009-12-10 13:43:57 +03:00
mbc = ( struct scache * ) & wt_msr_noirq ;
} else {
2012-12-27 13:40:38 +04:00
pr_info ( " wt_msr \n " ) ;
2009-12-10 13:43:57 +03:00
mbc = ( struct scache * ) & wt_msr ;
}
}
} else {
if ( cpuinfo . dcache_wb ) {
2012-12-27 13:40:38 +04:00
pr_info ( " wb_nomsr \n " ) ;
2009-12-10 13:43:57 +03:00
mbc = ( struct scache * ) & wb_nomsr ;
2010-05-31 23:16:30 +04:00
if ( cpuinfo . ver_code < = CPUVER_7_20_D ) {
2009-12-10 13:43:57 +03:00
/* MS: problem with signal handling - hw bug */
2012-12-27 13:40:38 +04:00
pr_info ( " WB won't work properly \n " ) ;
2009-12-10 13:43:57 +03:00
}
} else {
if ( cpuinfo . ver_code > = CPUVER_7_20_A ) {
2012-12-27 13:40:38 +04:00
pr_info ( " wt_nomsr_noirq \n " ) ;
2009-12-10 13:43:57 +03:00
mbc = ( struct scache * ) & wt_nomsr_noirq ;
} else {
2012-12-27 13:40:38 +04:00
pr_info ( " wt_nomsr \n " ) ;
2009-12-10 13:43:57 +03:00
mbc = ( struct scache * ) & wt_nomsr ;
}
}
}
2012-12-27 13:40:38 +04:00
/*
* FIXME Invalidation is done in U - BOOT
* WT cache : Data is already written to main memory
* WB cache : Discard data on noMMU which caused that kernel doesn ' t boot
*/
2010-04-26 10:54:13 +04:00
/* invalidate_dcache(); */
2010-01-12 16:51:04 +03:00
enable_dcache ( ) ;
invalidate_icache ( ) ;
enable_icache ( ) ;
2009-03-27 16:25:16 +03:00
}