2019-05-30 02:57:47 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2008-06-06 12:34:03 +04:00
/*
* arch / arm / mm / cache - xsc3l2 . c - XScale3 L2 cache controller support
*
* Copyright ( C ) 2007 ARM Limited
*/
# include <linux/init.h>
2010-12-16 07:29:04 +03:00
# include <linux/highmem.h>
2012-03-28 21:30:01 +04:00
# include <asm/cp15.h>
2008-08-10 21:08:10 +04:00
# include <asm/cputype.h>
2008-06-06 12:34:03 +04:00
# include <asm/cacheflush.h>
# define CR_L2 (1 << 26)
# define CACHE_LINE_SIZE 32
# define CACHE_LINE_SHIFT 5
# define CACHE_WAY_PER_SET 8
# define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
# define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
static inline int xsc3_l2_present ( void )
{
unsigned long l2ctype ;
__asm__ ( " mrc p15, 1, %0, c0, c0, 1 " : " =r " ( l2ctype ) ) ;
return ! ! ( l2ctype & 0xf8 ) ;
}
static inline void xsc3_l2_clean_mva ( unsigned long addr )
{
__asm__ ( " mcr p15, 1, %0, c7, c11, 1 " : : " r " ( addr ) ) ;
}
static inline void xsc3_l2_inv_mva ( unsigned long addr )
{
__asm__ ( " mcr p15, 1, %0, c7, c7, 1 " : : " r " ( addr ) ) ;
}
static inline void xsc3_l2_inv_all ( void )
{
unsigned long l2ctype , set_way ;
int set , way ;
__asm__ ( " mrc p15, 1, %0, c0, c0, 1 " : " =r " ( l2ctype ) ) ;
for ( set = 0 ; set < CACHE_SET_SIZE ( l2ctype ) ; set + + ) {
for ( way = 0 ; way < CACHE_WAY_PER_SET ; way + + ) {
set_way = ( way < < 29 ) | ( set < < 5 ) ;
__asm__ ( " mcr p15, 1, %0, c7, c11, 2 " : : " r " ( set_way ) ) ;
}
}
dsb ( ) ;
}
2010-12-16 07:29:04 +03:00
static inline void l2_unmap_va ( unsigned long va )
{
2008-09-19 06:55:47 +04:00
# ifdef CONFIG_HIGHMEM
2010-12-16 07:29:04 +03:00
if ( va ! = - 1 )
kunmap_atomic ( ( void * ) va ) ;
2008-09-19 06:55:47 +04:00
# endif
2010-12-16 07:29:04 +03:00
}
2008-09-19 06:55:47 +04:00
2010-12-16 07:29:04 +03:00
static inline unsigned long l2_map_va ( unsigned long pa , unsigned long prev_va )
2008-09-19 06:55:47 +04:00
{
# ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK ;
unsigned long pa_offset = pa < < ( 32 - PAGE_SHIFT ) ;
if ( unlikely ( pa_offset < ( prev_va < < ( 32 - PAGE_SHIFT ) ) ) ) {
/*
* Switching to a new page . Because cache ops are
* using virtual addresses only , we must put a mapping
2010-12-16 07:29:04 +03:00
* in place for it .
2008-09-19 06:55:47 +04:00
*/
2010-12-16 07:29:04 +03:00
l2_unmap_va ( prev_va ) ;
va = ( unsigned long ) kmap_atomic_pfn ( pa > > PAGE_SHIFT ) ;
2008-09-19 06:55:47 +04:00
}
return va + ( pa_offset > > ( 32 - PAGE_SHIFT ) ) ;
# else
return __phys_to_virt ( pa ) ;
# endif
}
2008-06-06 12:34:03 +04:00
static void xsc3_l2_inv_range ( unsigned long start , unsigned long end )
{
2010-12-16 07:29:04 +03:00
unsigned long vaddr ;
2008-09-19 06:55:47 +04:00
2008-06-06 12:34:03 +04:00
if ( start = = 0 & & end = = - 1ul ) {
xsc3_l2_inv_all ( ) ;
return ;
}
2008-09-19 06:55:47 +04:00
vaddr = - 1 ; /* to force the first mapping */
2008-06-06 12:34:03 +04:00
/*
* Clean and invalidate partial first cache line .
*/
if ( start & ( CACHE_LINE_SIZE - 1 ) ) {
2010-12-16 07:29:04 +03:00
vaddr = l2_map_va ( start & ~ ( CACHE_LINE_SIZE - 1 ) , vaddr ) ;
2008-09-19 06:55:47 +04:00
xsc3_l2_clean_mva ( vaddr ) ;
xsc3_l2_inv_mva ( vaddr ) ;
2008-06-06 12:34:03 +04:00
start = ( start | ( CACHE_LINE_SIZE - 1 ) ) + 1 ;
}
/*
2008-09-19 06:55:47 +04:00
* Invalidate all full cache lines between ' start ' and ' end ' .
2008-06-06 12:34:03 +04:00
*/
2008-09-19 06:55:47 +04:00
while ( start < ( end & ~ ( CACHE_LINE_SIZE - 1 ) ) ) {
2010-12-16 07:29:04 +03:00
vaddr = l2_map_va ( start , vaddr ) ;
2008-09-19 06:55:47 +04:00
xsc3_l2_inv_mva ( vaddr ) ;
start + = CACHE_LINE_SIZE ;
2008-06-06 12:34:03 +04:00
}
/*
2008-09-19 06:55:47 +04:00
* Clean and invalidate partial last cache line .
2008-06-06 12:34:03 +04:00
*/
2008-09-19 06:55:47 +04:00
if ( start < end ) {
2010-12-16 07:29:04 +03:00
vaddr = l2_map_va ( start , vaddr ) ;
2008-09-19 06:55:47 +04:00
xsc3_l2_clean_mva ( vaddr ) ;
xsc3_l2_inv_mva ( vaddr ) ;
2008-06-06 12:34:03 +04:00
}
2010-12-16 07:29:04 +03:00
l2_unmap_va ( vaddr ) ;
2008-09-19 06:55:47 +04:00
2008-06-06 12:34:03 +04:00
dsb ( ) ;
}
static void xsc3_l2_clean_range ( unsigned long start , unsigned long end )
{
2010-12-16 07:29:04 +03:00
unsigned long vaddr ;
2008-09-19 06:55:47 +04:00
vaddr = - 1 ; /* to force the first mapping */
2008-06-06 12:34:03 +04:00
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
while ( start < end ) {
2010-12-16 07:29:04 +03:00
vaddr = l2_map_va ( start , vaddr ) ;
2008-09-19 06:55:47 +04:00
xsc3_l2_clean_mva ( vaddr ) ;
2008-06-06 12:34:03 +04:00
start + = CACHE_LINE_SIZE ;
}
2010-12-16 07:29:04 +03:00
l2_unmap_va ( vaddr ) ;
2008-09-19 06:55:47 +04:00
2008-06-06 12:34:03 +04:00
dsb ( ) ;
}
/*
* optimize L2 flush all operation by set / way format
*/
static inline void xsc3_l2_flush_all ( void )
{
unsigned long l2ctype , set_way ;
int set , way ;
__asm__ ( " mrc p15, 1, %0, c0, c0, 1 " : " =r " ( l2ctype ) ) ;
for ( set = 0 ; set < CACHE_SET_SIZE ( l2ctype ) ; set + + ) {
for ( way = 0 ; way < CACHE_WAY_PER_SET ; way + + ) {
set_way = ( way < < 29 ) | ( set < < 5 ) ;
__asm__ ( " mcr p15, 1, %0, c7, c15, 2 " : : " r " ( set_way ) ) ;
}
}
dsb ( ) ;
}
static void xsc3_l2_flush_range ( unsigned long start , unsigned long end )
{
2010-12-16 07:29:04 +03:00
unsigned long vaddr ;
2008-09-19 06:55:47 +04:00
2008-06-06 12:34:03 +04:00
if ( start = = 0 & & end = = - 1ul ) {
xsc3_l2_flush_all ( ) ;
return ;
}
2008-09-19 06:55:47 +04:00
vaddr = - 1 ; /* to force the first mapping */
2008-06-06 12:34:03 +04:00
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
while ( start < end ) {
2010-12-16 07:29:04 +03:00
vaddr = l2_map_va ( start , vaddr ) ;
2008-09-19 06:55:47 +04:00
xsc3_l2_clean_mva ( vaddr ) ;
xsc3_l2_inv_mva ( vaddr ) ;
2008-06-06 12:34:03 +04:00
start + = CACHE_LINE_SIZE ;
}
2010-12-16 07:29:04 +03:00
l2_unmap_va ( vaddr ) ;
2008-09-19 06:55:47 +04:00
2008-06-06 12:34:03 +04:00
dsb ( ) ;
}
static int __init xsc3_l2_init ( void )
{
if ( ! cpu_is_xsc3 ( ) | | ! xsc3_l2_present ( ) )
return 0 ;
2009-12-30 10:27:24 +03:00
if ( get_cr ( ) & CR_L2 ) {
2008-06-06 12:34:03 +04:00
pr_info ( " XScale3 L2 cache enabled. \n " ) ;
xsc3_l2_inv_all ( ) ;
2009-12-30 10:27:24 +03:00
outer_cache . inv_range = xsc3_l2_inv_range ;
outer_cache . clean_range = xsc3_l2_clean_range ;
outer_cache . flush_range = xsc3_l2_flush_range ;
}
2008-06-06 12:34:03 +04:00
return 0 ;
}
core_initcall ( xsc3_l2_init ) ;