2007-02-05 16:48:19 +03:00
/*
* arch / arm / mm / cache - l2x0 . c - L210 / L220 cache controller support
*
* Copyright ( C ) 2007 ARM Limited
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
2011-08-03 21:12:05 +04:00
# include <linux/err.h>
2007-02-05 16:48:19 +03:00
# include <linux/init.h>
2007-07-20 14:42:40 +04:00
# include <linux/spinlock.h>
2008-09-06 15:10:45 +04:00
# include <linux/io.h>
2011-08-03 21:12:05 +04:00
# include <linux/of.h>
# include <linux/of_address.h>
2007-02-05 16:48:19 +03:00
# include <asm/cacheflush.h>
# include <asm/hardware/cache-l2x0.h>
2013-12-13 19:42:19 +04:00
# include "cache-tauros3.h"
2012-11-06 04:58:07 +04:00
# include "cache-aurora-l2.h"
2007-02-05 16:48:19 +03:00
2014-03-15 20:47:54 +04:00
struct l2c_init_data {
2014-03-15 20:47:57 +04:00
unsigned num_lock ;
2014-03-15 20:47:54 +04:00
void ( * of_parse ) ( const struct device_node * , u32 * , u32 * ) ;
2014-03-15 20:47:57 +04:00
void ( * enable ) ( void __iomem * , u32 , unsigned ) ;
2014-03-15 20:48:07 +04:00
void ( * fixup ) ( void __iomem * , u32 , struct outer_cache_fns * ) ;
2014-03-15 20:47:55 +04:00
void ( * save ) ( void __iomem * ) ;
2014-03-15 20:47:54 +04:00
struct outer_cache_fns outer_cache ;
} ;
2007-02-05 16:48:19 +03:00
# define CACHE_LINE_SIZE 32
static void __iomem * l2x0_base ;
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( l2x0_lock ) ;
2011-09-18 14:27:30 +04:00
static u32 l2x0_way_mask ; /* Bitmask of active ways */
static u32 l2x0_size ;
2012-04-20 20:21:08 +04:00
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC ;
2007-02-05 16:48:19 +03:00
2011-09-30 17:43:12 +04:00
struct l2x0_regs l2x0_saved_regs ;
2014-03-15 20:47:50 +04:00
/*
* Common code for all cache controllers .
*/
2014-03-15 20:48:14 +04:00
static inline void l2c_wait_mask ( void __iomem * reg , unsigned long mask )
2007-02-05 16:48:19 +03:00
{
2010-08-31 16:05:22 +04:00
/* wait for cache operation by line or way to complete */
2010-07-29 01:01:25 +04:00
while ( readl_relaxed ( reg ) & mask )
2011-09-09 13:30:34 +04:00
cpu_relax ( ) ;
2007-02-05 16:48:19 +03:00
}
2014-03-16 21:19:21 +04:00
/*
* This should only be called when we have a requirement that the
* register be written due to a work - around , as platforms running
* in non - secure mode may not be able to access this register .
*/
static inline void l2c_set_debug ( void __iomem * base , unsigned long val )
{
outer_cache . set_debug ( val ) ;
}
2014-03-15 20:47:56 +04:00
static void __l2c_op_way ( void __iomem * reg )
{
writel_relaxed ( l2x0_way_mask , reg ) ;
2014-03-15 20:48:14 +04:00
l2c_wait_mask ( reg , l2x0_way_mask ) ;
2014-03-15 20:47:56 +04:00
}
2014-03-15 20:47:50 +04:00
static inline void l2c_unlock ( void __iomem * base , unsigned num )
{
unsigned i ;
for ( i = 0 ; i < num ; i + + ) {
writel_relaxed ( 0 , base + L2X0_LOCKDOWN_WAY_D_BASE +
i * L2X0_LOCKDOWN_STRIDE ) ;
writel_relaxed ( 0 , base + L2X0_LOCKDOWN_WAY_I_BASE +
i * L2X0_LOCKDOWN_STRIDE ) ;
}
}
2014-03-15 20:47:57 +04:00
/*
* Enable the L2 cache controller . This function must only be
* called when the cache controller is known to be disabled .
*/
static void l2c_enable ( void __iomem * base , u32 aux , unsigned num_lock )
{
unsigned long flags ;
2014-03-18 00:10:31 +04:00
/* Only write the aux register if it needs changing */
if ( readl_relaxed ( base + L2X0_AUX_CTRL ) ! = aux )
writel_relaxed ( aux , base + L2X0_AUX_CTRL ) ;
2014-03-15 20:47:57 +04:00
2014-03-17 21:15:02 +04:00
l2c_unlock ( base , num_lock ) ;
2014-03-15 20:47:57 +04:00
local_irq_save ( flags ) ;
__l2c_op_way ( base + L2X0_INV_WAY ) ;
writel_relaxed ( 0 , base + sync_reg_offset ) ;
l2c_wait_mask ( base + sync_reg_offset , 1 ) ;
local_irq_restore ( flags ) ;
writel_relaxed ( L2X0_CTRL_EN , base + L2X0_CTRL ) ;
}
static void l2c_disable ( void )
{
void __iomem * base = l2x0_base ;
outer_cache . flush_all ( ) ;
writel_relaxed ( 0 , base + L2X0_CTRL ) ;
dsb ( st ) ;
}
2010-08-31 16:05:22 +04:00
# ifdef CONFIG_CACHE_PL310
static inline void cache_wait ( void __iomem * reg , unsigned long mask )
{
/* cache operations by line are atomic on PL310 */
}
# else
2014-03-15 20:48:14 +04:00
# define cache_wait l2c_wait_mask
2010-08-31 16:05:22 +04:00
# endif
2007-02-05 16:48:19 +03:00
static inline void cache_sync ( void )
{
2009-11-19 14:41:09 +03:00
void __iomem * base = l2x0_base ;
2011-02-17 09:03:51 +03:00
2012-04-20 20:21:08 +04:00
writel_relaxed ( 0 , base + sync_reg_offset ) ;
2009-11-19 14:41:09 +03:00
cache_wait ( base + L2X0_CACHE_SYNC , 1 ) ;
2007-02-05 16:48:19 +03:00
}
2010-02-04 21:35:06 +03:00
static inline void l2x0_clean_line ( unsigned long addr )
{
void __iomem * base = l2x0_base ;
cache_wait ( base + L2X0_CLEAN_LINE_PA , 1 ) ;
2010-07-29 01:01:25 +04:00
writel_relaxed ( addr , base + L2X0_CLEAN_LINE_PA ) ;
2010-02-04 21:35:06 +03:00
}
static inline void l2x0_inv_line ( unsigned long addr )
{
void __iomem * base = l2x0_base ;
cache_wait ( base + L2X0_INV_LINE_PA , 1 ) ;
2010-07-29 01:01:25 +04:00
writel_relaxed ( addr , base + L2X0_INV_LINE_PA ) ;
2010-02-04 21:35:06 +03:00
}
2011-03-08 08:59:54 +03:00
# if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
2012-04-20 20:22:11 +04:00
static inline void debug_writel ( unsigned long val )
{
if ( outer_cache . set_debug )
2014-03-16 21:19:21 +04:00
l2c_set_debug ( l2x0_base , val ) ;
2012-04-20 20:22:11 +04:00
}
2011-03-08 08:59:54 +03:00
# else
/* Optimised out for non-errata case */
static inline void debug_writel ( unsigned long val )
{
}
# endif
2010-02-04 21:42:42 +03:00
2011-03-08 08:59:54 +03:00
# ifdef CONFIG_PL310_ERRATA_588369
2010-02-04 21:42:42 +03:00
static inline void l2x0_flush_line ( unsigned long addr )
{
void __iomem * base = l2x0_base ;
/* Clean by PA followed by Invalidate by PA */
cache_wait ( base + L2X0_CLEAN_LINE_PA , 1 ) ;
2010-07-29 01:01:25 +04:00
writel_relaxed ( addr , base + L2X0_CLEAN_LINE_PA ) ;
2010-02-04 21:42:42 +03:00
cache_wait ( base + L2X0_INV_LINE_PA , 1 ) ;
2010-07-29 01:01:25 +04:00
writel_relaxed ( addr , base + L2X0_INV_LINE_PA ) ;
2010-02-04 21:42:42 +03:00
}
# else
2010-02-04 21:35:06 +03:00
static inline void l2x0_flush_line ( unsigned long addr )
{
void __iomem * base = l2x0_base ;
cache_wait ( base + L2X0_CLEAN_INV_LINE_PA , 1 ) ;
2010-07-29 01:01:25 +04:00
writel_relaxed ( addr , base + L2X0_CLEAN_INV_LINE_PA ) ;
2010-02-04 21:35:06 +03:00
}
2010-02-04 21:42:42 +03:00
# endif
2010-02-04 21:35:06 +03:00
2010-03-24 18:48:53 +03:00
static void l2x0_cache_sync ( void )
{
unsigned long flags ;
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2010-03-24 18:48:53 +03:00
cache_sync ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2010-03-24 18:48:53 +03:00
}
2011-07-01 17:36:19 +04:00
static void __l2x0_flush_all ( void )
2010-07-31 19:35:24 +04:00
{
2011-03-08 08:59:54 +03:00
debug_writel ( 0x03 ) ;
2014-03-15 20:47:56 +04:00
__l2c_op_way ( l2x0_base + L2X0_CLEAN_INV_WAY ) ;
2010-07-31 19:35:24 +04:00
cache_sync ( ) ;
2011-03-08 08:59:54 +03:00
debug_writel ( 0x00 ) ;
2011-07-01 17:36:19 +04:00
}
static void l2x0_flush_all ( void )
{
unsigned long flags ;
/* clean all ways */
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2011-07-01 17:36:19 +04:00
__l2x0_flush_all ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2010-07-31 19:35:24 +04:00
}
2010-07-11 13:28:41 +04:00
static void l2x0_clean_all ( void )
{
unsigned long flags ;
/* clean all ways */
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2014-03-15 20:47:56 +04:00
__l2c_op_way ( l2x0_base + L2X0_CLEAN_WAY ) ;
2010-07-11 13:28:41 +04:00
cache_sync ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2010-07-11 13:28:41 +04:00
}
2010-07-31 19:35:24 +04:00
static void l2x0_inv_all ( void )
2007-02-05 16:48:19 +03:00
{
2009-11-19 14:12:15 +03:00
unsigned long flags ;
2007-02-05 16:48:19 +03:00
/* invalidate all ways */
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2010-07-31 19:35:24 +04:00
/* Invalidating when L2 is enabled is a nono */
2012-11-06 04:58:07 +04:00
BUG_ON ( readl ( l2x0_base + L2X0_CTRL ) & L2X0_CTRL_EN ) ;
2014-03-15 20:47:56 +04:00
__l2c_op_way ( l2x0_base + L2X0_INV_WAY ) ;
2007-02-05 16:48:19 +03:00
cache_sync ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2007-02-05 16:48:19 +03:00
}
static void l2x0_inv_range ( unsigned long start , unsigned long end )
{
2009-11-19 14:41:09 +03:00
void __iomem * base = l2x0_base ;
2009-11-19 14:12:15 +03:00
unsigned long flags ;
2007-02-05 16:48:19 +03:00
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2007-09-15 03:56:19 +04:00
if ( start & ( CACHE_LINE_SIZE - 1 ) ) {
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
2010-02-04 21:42:42 +03:00
debug_writel ( 0x03 ) ;
2010-02-04 21:35:06 +03:00
l2x0_flush_line ( start ) ;
2010-02-04 21:42:42 +03:00
debug_writel ( 0x00 ) ;
2007-09-15 03:56:19 +04:00
start + = CACHE_LINE_SIZE ;
}
if ( end & ( CACHE_LINE_SIZE - 1 ) ) {
end & = ~ ( CACHE_LINE_SIZE - 1 ) ;
2010-02-04 21:42:42 +03:00
debug_writel ( 0x03 ) ;
2010-02-04 21:35:06 +03:00
l2x0_flush_line ( end ) ;
2010-02-04 21:42:42 +03:00
debug_writel ( 0x00 ) ;
2007-09-15 03:56:19 +04:00
}
2009-11-19 14:12:15 +03:00
while ( start < end ) {
unsigned long blk_end = start + min ( end - start , 4096UL ) ;
while ( start < blk_end ) {
2010-02-04 21:35:06 +03:00
l2x0_inv_line ( start ) ;
2009-11-19 14:12:15 +03:00
start + = CACHE_LINE_SIZE ;
}
if ( blk_end < end ) {
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2009-11-19 14:12:15 +03:00
}
}
2009-11-19 14:41:09 +03:00
cache_wait ( base + L2X0_INV_LINE_PA , 1 ) ;
2007-02-05 16:48:19 +03:00
cache_sync ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2007-02-05 16:48:19 +03:00
}
static void l2x0_clean_range ( unsigned long start , unsigned long end )
{
2009-11-19 14:41:09 +03:00
void __iomem * base = l2x0_base ;
2009-11-19 14:12:15 +03:00
unsigned long flags ;
2007-02-05 16:48:19 +03:00
2010-07-11 13:28:41 +04:00
if ( ( end - start ) > = l2x0_size ) {
l2x0_clean_all ( ) ;
return ;
}
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2007-02-05 16:48:19 +03:00
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
2009-11-19 14:12:15 +03:00
while ( start < end ) {
unsigned long blk_end = start + min ( end - start , 4096UL ) ;
while ( start < blk_end ) {
2010-02-04 21:35:06 +03:00
l2x0_clean_line ( start ) ;
2009-11-19 14:12:15 +03:00
start + = CACHE_LINE_SIZE ;
}
if ( blk_end < end ) {
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2009-11-19 14:12:15 +03:00
}
}
2009-11-19 14:41:09 +03:00
cache_wait ( base + L2X0_CLEAN_LINE_PA , 1 ) ;
2007-02-05 16:48:19 +03:00
cache_sync ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2007-02-05 16:48:19 +03:00
}
static void l2x0_flush_range ( unsigned long start , unsigned long end )
{
2009-11-19 14:41:09 +03:00
void __iomem * base = l2x0_base ;
2009-11-19 14:12:15 +03:00
unsigned long flags ;
2007-02-05 16:48:19 +03:00
2010-07-11 13:28:41 +04:00
if ( ( end - start ) > = l2x0_size ) {
l2x0_flush_all ( ) ;
return ;
}
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2007-02-05 16:48:19 +03:00
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
2009-11-19 14:12:15 +03:00
while ( start < end ) {
unsigned long blk_end = start + min ( end - start , 4096UL ) ;
2010-02-04 21:42:42 +03:00
debug_writel ( 0x03 ) ;
2009-11-19 14:12:15 +03:00
while ( start < blk_end ) {
2010-02-04 21:35:06 +03:00
l2x0_flush_line ( start ) ;
2009-11-19 14:12:15 +03:00
start + = CACHE_LINE_SIZE ;
}
2010-02-04 21:42:42 +03:00
debug_writel ( 0x00 ) ;
2009-11-19 14:12:15 +03:00
if ( blk_end < end ) {
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2009-11-19 14:12:15 +03:00
}
}
2009-11-19 14:41:09 +03:00
cache_wait ( base + L2X0_CLEAN_INV_LINE_PA , 1 ) ;
2007-02-05 16:48:19 +03:00
cache_sync ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2007-02-05 16:48:19 +03:00
}
2010-07-31 19:35:24 +04:00
static void l2x0_disable ( void )
{
unsigned long flags ;
2009-07-03 17:44:46 +04:00
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2011-07-01 17:36:19 +04:00
__l2x0_flush_all ( ) ;
writel_relaxed ( 0 , l2x0_base + L2X0_CTRL ) ;
2013-06-12 12:59:59 +04:00
dsb ( st ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
2010-07-31 19:35:24 +04:00
}
2014-03-16 02:49:59 +04:00
static void l2x0_enable ( void __iomem * base , u32 aux , unsigned num_lock )
2011-09-06 10:45:46 +04:00
{
2014-03-16 02:49:59 +04:00
unsigned id ;
2011-09-06 10:45:46 +04:00
2014-03-16 02:49:59 +04:00
id = readl_relaxed ( base + L2X0_CACHE_ID ) & L2X0_CACHE_ID_PART_MASK ;
if ( id = = L2X0_CACHE_ID_PART_L310 )
num_lock = 8 ;
else
num_lock = 1 ;
2011-09-06 10:45:46 +04:00
2014-03-15 20:47:57 +04:00
/* l2x0 controller is disabled */
writel_relaxed ( aux , base + L2X0_AUX_CTRL ) ;
2014-03-17 21:15:02 +04:00
/* Make sure that I&D is not locked down when starting */
2014-03-16 02:49:59 +04:00
l2c_unlock ( base , num_lock ) ;
2014-03-17 21:15:02 +04:00
2014-03-15 20:47:57 +04:00
l2x0_inv_all ( ) ;
/* enable L2X0 */
writel_relaxed ( L2X0_CTRL_EN , base + L2X0_CTRL ) ;
}
2014-03-15 20:48:11 +04:00
static void l2x0_resume ( void )
{
2014-03-15 20:48:13 +04:00
void __iomem * base = l2x0_base ;
2014-03-15 20:48:11 +04:00
2014-03-15 20:48:13 +04:00
if ( ! ( readl_relaxed ( base + L2X0_CTRL ) & L2X0_CTRL_EN ) )
l2x0_enable ( base , l2x0_saved_regs . aux_ctrl , 0 ) ;
2014-03-15 20:48:11 +04:00
}
2014-03-15 20:47:52 +04:00
static const struct l2c_init_data l2x0_init_fns __initconst = {
2014-03-15 20:47:57 +04:00
. enable = l2x0_enable ,
2014-03-15 20:47:52 +04:00
. outer_cache = {
. inv_range = l2x0_inv_range ,
. clean_range = l2x0_clean_range ,
. flush_range = l2x0_flush_range ,
. flush_all = l2x0_flush_all ,
. disable = l2x0_disable ,
. sync = l2x0_cache_sync ,
2014-03-15 20:48:11 +04:00
. resume = l2x0_resume ,
2014-03-15 20:47:52 +04:00
} ,
} ;
2014-03-15 22:55:53 +04:00
/*
* L2C - 210 specific code .
*
* The L2C - 2 x0 PA , set / way and sync operations are atomic , but we must
* ensure that no background operation is running . The way operations
* are all background tasks .
*
* While a background operation is in progress , any new operation is
* ignored ( unspecified whether this causes an error . ) Thankfully , not
* used on SMP .
*
* Never has a different sync register other than L2X0_CACHE_SYNC , but
* we use sync_reg_offset here so we can share some of this with L2C - 310.
*/
static void __l2c210_cache_sync ( void __iomem * base )
{
writel_relaxed ( 0 , base + sync_reg_offset ) ;
}
static void __l2c210_op_pa_range ( void __iomem * reg , unsigned long start ,
unsigned long end )
{
while ( start < end ) {
writel_relaxed ( start , reg ) ;
start + = CACHE_LINE_SIZE ;
}
}
static void l2c210_inv_range ( unsigned long start , unsigned long end )
{
void __iomem * base = l2x0_base ;
if ( start & ( CACHE_LINE_SIZE - 1 ) ) {
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
writel_relaxed ( start , base + L2X0_CLEAN_INV_LINE_PA ) ;
start + = CACHE_LINE_SIZE ;
}
if ( end & ( CACHE_LINE_SIZE - 1 ) ) {
end & = ~ ( CACHE_LINE_SIZE - 1 ) ;
writel_relaxed ( end , base + L2X0_CLEAN_INV_LINE_PA ) ;
}
__l2c210_op_pa_range ( base + L2X0_INV_LINE_PA , start , end ) ;
__l2c210_cache_sync ( base ) ;
}
static void l2c210_clean_range ( unsigned long start , unsigned long end )
{
void __iomem * base = l2x0_base ;
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
__l2c210_op_pa_range ( base + L2X0_CLEAN_LINE_PA , start , end ) ;
__l2c210_cache_sync ( base ) ;
}
static void l2c210_flush_range ( unsigned long start , unsigned long end )
{
void __iomem * base = l2x0_base ;
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
__l2c210_op_pa_range ( base + L2X0_CLEAN_INV_LINE_PA , start , end ) ;
__l2c210_cache_sync ( base ) ;
}
static void l2c210_flush_all ( void )
{
void __iomem * base = l2x0_base ;
BUG_ON ( ! irqs_disabled ( ) ) ;
__l2c_op_way ( base + L2X0_CLEAN_INV_WAY ) ;
__l2c210_cache_sync ( base ) ;
}
static void l2c210_sync ( void )
{
__l2c210_cache_sync ( l2x0_base ) ;
}
static void l2c210_resume ( void )
{
void __iomem * base = l2x0_base ;
if ( ! ( readl_relaxed ( base + L2X0_CTRL ) & L2X0_CTRL_EN ) )
l2c_enable ( base , l2x0_saved_regs . aux_ctrl , 1 ) ;
}
static const struct l2c_init_data l2c210_data __initconst = {
. num_lock = 1 ,
. enable = l2c_enable ,
. outer_cache = {
. inv_range = l2c210_inv_range ,
. clean_range = l2c210_clean_range ,
. flush_range = l2c210_flush_range ,
. flush_all = l2c210_flush_all ,
. disable = l2c_disable ,
. sync = l2c210_sync ,
. resume = l2c210_resume ,
} ,
} ;
2014-03-15 20:48:07 +04:00
/*
* L2C - 310 specific code .
*
2014-03-16 00:51:47 +04:00
* Very similar to L2C - 210 , the PA , set / way and sync operations are atomic ,
* and the way operations are all background tasks . However , issuing an
* operation while a background operation is in progress results in a
* SLVERR response . We can reuse :
*
* __l2c210_cache_sync ( using sync_reg_offset )
* l2c210_sync
* l2c210_inv_range ( if 588369 is not applicable )
* l2c210_clean_range
* l2c210_flush_range ( if 588369 is not applicable )
* l2c210_flush_all ( if 727915 is not applicable )
*
2014-03-15 20:48:07 +04:00
* Errata :
* 588369 : PL310 R0P0 - > R1P0 , fixed R2P0 .
* Affects : all clean + invalidate operations
* clean and invalidate skips the invalidate step , so we need to issue
* separate operations . We also require the above debug workaround
* enclosing this code fragment on affected parts . On unaffected parts ,
* we must not use this workaround without the debug register writes
* to avoid exposing a problem similar to 727915.
*
* 727915 : PL310 R2P0 - > R3P0 , fixed R3P1 .
* Affects : clean + invalidate by way
* clean and invalidate by way runs in the background , and a store can
* hit the line between the clean operation and invalidate operation ,
* resulting in the store being lost .
*
* 753970 : PL310 R3P0 , fixed R3P1 .
* Affects : sync
* prevents merging writes after the sync operation , until another L2C
* operation is performed ( or a number of other conditions . )
*
* 769419 : PL310 R0P0 - > R3P1 , fixed R3P2 .
* Affects : store buffer
* store buffer is not automatically drained .
*/
2014-03-15 20:48:16 +04:00
static void l2c310_set_debug ( unsigned long val )
{
writel_relaxed ( val , l2x0_base + L2X0_DEBUG_CTRL ) ;
}
2014-03-15 23:08:11 +04:00
static void l2c310_inv_range_erratum ( unsigned long start , unsigned long end )
{
void __iomem * base = l2x0_base ;
if ( ( start | end ) & ( CACHE_LINE_SIZE - 1 ) ) {
unsigned long flags ;
/* Erratum 588369 for both clean+invalidate operations */
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
l2c_set_debug ( base , 0x03 ) ;
if ( start & ( CACHE_LINE_SIZE - 1 ) ) {
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
writel_relaxed ( start , base + L2X0_CLEAN_LINE_PA ) ;
writel_relaxed ( start , base + L2X0_INV_LINE_PA ) ;
start + = CACHE_LINE_SIZE ;
}
if ( end & ( CACHE_LINE_SIZE - 1 ) ) {
end & = ~ ( CACHE_LINE_SIZE - 1 ) ;
writel_relaxed ( end , base + L2X0_CLEAN_LINE_PA ) ;
writel_relaxed ( end , base + L2X0_INV_LINE_PA ) ;
}
l2c_set_debug ( base , 0x00 ) ;
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
}
__l2c210_op_pa_range ( base + L2X0_INV_LINE_PA , start , end ) ;
__l2c210_cache_sync ( base ) ;
}
static void l2c310_flush_range_erratum ( unsigned long start , unsigned long end )
{
raw_spinlock_t * lock = & l2x0_lock ;
unsigned long flags ;
void __iomem * base = l2x0_base ;
raw_spin_lock_irqsave ( lock , flags ) ;
while ( start < end ) {
unsigned long blk_end = start + min ( end - start , 4096UL ) ;
l2c_set_debug ( base , 0x03 ) ;
while ( start < blk_end ) {
writel_relaxed ( start , base + L2X0_CLEAN_LINE_PA ) ;
writel_relaxed ( start , base + L2X0_INV_LINE_PA ) ;
start + = CACHE_LINE_SIZE ;
}
l2c_set_debug ( base , 0x00 ) ;
if ( blk_end < end ) {
raw_spin_unlock_irqrestore ( lock , flags ) ;
raw_spin_lock_irqsave ( lock , flags ) ;
}
}
raw_spin_unlock_irqrestore ( lock , flags ) ;
__l2c210_cache_sync ( base ) ;
}
2014-03-15 20:48:18 +04:00
static void l2c310_flush_all_erratum ( void )
{
void __iomem * base = l2x0_base ;
unsigned long flags ;
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
l2c_set_debug ( base , 0x03 ) ;
__l2c_op_way ( base + L2X0_CLEAN_INV_WAY ) ;
l2c_set_debug ( base , 0x00 ) ;
__l2c210_cache_sync ( base ) ;
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
}
2014-03-15 20:48:13 +04:00
static void __init l2c310_save ( void __iomem * base )
2014-03-15 20:48:11 +04:00
{
2014-03-15 20:48:13 +04:00
unsigned revision ;
2014-03-15 20:48:11 +04:00
l2x0_saved_regs . tag_latency = readl_relaxed ( base +
L2X0_TAG_LATENCY_CTRL ) ;
l2x0_saved_regs . data_latency = readl_relaxed ( base +
L2X0_DATA_LATENCY_CTRL ) ;
l2x0_saved_regs . filter_end = readl_relaxed ( base +
L2X0_ADDR_FILTER_END ) ;
l2x0_saved_regs . filter_start = readl_relaxed ( base +
L2X0_ADDR_FILTER_START ) ;
2014-03-15 20:48:13 +04:00
revision = readl_relaxed ( base + L2X0_CACHE_ID ) &
L2X0_CACHE_ID_RTL_MASK ;
/* From r2p0, there is Prefetch offset/control register */
if ( revision > = L310_CACHE_ID_RTL_R2P0 )
2014-03-15 20:48:11 +04:00
l2x0_saved_regs . prefetch_ctrl = readl_relaxed ( base +
2014-03-15 20:48:13 +04:00
L2X0_PREFETCH_CTRL ) ;
/* From r3p0, there is Power control register */
if ( revision > = L310_CACHE_ID_RTL_R3P0 )
l2x0_saved_regs . pwr_ctrl = readl_relaxed ( base +
L2X0_POWER_CTRL ) ;
2014-03-15 20:48:11 +04:00
}
2014-03-15 20:48:13 +04:00
static void l2c310_resume ( void )
2014-03-15 20:48:11 +04:00
{
2014-03-15 20:48:13 +04:00
void __iomem * base = l2x0_base ;
if ( ! ( readl_relaxed ( base + L2X0_CTRL ) & L2X0_CTRL_EN ) ) {
unsigned revision ;
2014-03-15 20:48:11 +04:00
/* restore pl310 setup */
writel_relaxed ( l2x0_saved_regs . tag_latency ,
2014-03-15 20:48:13 +04:00
base + L2X0_TAG_LATENCY_CTRL ) ;
2014-03-15 20:48:11 +04:00
writel_relaxed ( l2x0_saved_regs . data_latency ,
2014-03-15 20:48:13 +04:00
base + L2X0_DATA_LATENCY_CTRL ) ;
2014-03-15 20:48:11 +04:00
writel_relaxed ( l2x0_saved_regs . filter_end ,
2014-03-15 20:48:13 +04:00
base + L2X0_ADDR_FILTER_END ) ;
2014-03-15 20:48:11 +04:00
writel_relaxed ( l2x0_saved_regs . filter_start ,
2014-03-15 20:48:13 +04:00
base + L2X0_ADDR_FILTER_START ) ;
2014-03-15 20:48:11 +04:00
2014-03-15 20:48:13 +04:00
revision = readl_relaxed ( base + L2X0_CACHE_ID ) &
L2X0_CACHE_ID_RTL_MASK ;
2014-03-15 20:48:11 +04:00
2014-03-15 20:48:13 +04:00
if ( revision > = L310_CACHE_ID_RTL_R2P0 )
2014-03-15 20:48:11 +04:00
writel_relaxed ( l2x0_saved_regs . prefetch_ctrl ,
2014-03-15 20:48:13 +04:00
base + L2X0_PREFETCH_CTRL ) ;
if ( revision > = L310_CACHE_ID_RTL_R3P0 )
writel_relaxed ( l2x0_saved_regs . pwr_ctrl ,
base + L2X0_POWER_CTRL ) ;
2014-03-15 20:48:11 +04:00
2014-03-15 20:48:13 +04:00
l2c_enable ( base , l2x0_saved_regs . aux_ctrl , 8 ) ;
}
2014-03-15 20:48:11 +04:00
}
2014-03-15 20:48:07 +04:00
static void __init l2c310_fixup ( void __iomem * base , u32 cache_id ,
struct outer_cache_fns * fns )
{
unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK ;
const char * errata [ 4 ] ;
unsigned n = 0 ;
2014-03-15 23:08:11 +04:00
/* For compatibility */
2014-03-15 20:48:07 +04:00
if ( revision < = L310_CACHE_ID_RTL_R3P0 )
2014-03-15 20:48:16 +04:00
fns - > set_debug = l2c310_set_debug ;
2014-03-15 20:48:07 +04:00
2014-03-15 23:08:11 +04:00
if ( IS_ENABLED ( CONFIG_PL310_ERRATA_588369 ) & &
revision < L310_CACHE_ID_RTL_R2P0 & &
/* For bcm compatibility */
2014-03-16 00:51:47 +04:00
fns - > inv_range = = l2c210_inv_range ) {
2014-03-15 23:08:11 +04:00
fns - > inv_range = l2c310_inv_range_erratum ;
fns - > flush_range = l2c310_flush_range_erratum ;
errata [ n + + ] = " 588369 " ;
}
2014-03-15 20:48:18 +04:00
if ( IS_ENABLED ( CONFIG_PL310_ERRATA_727915 ) & &
revision > = L310_CACHE_ID_RTL_R2P0 & &
revision < L310_CACHE_ID_RTL_R3P1 ) {
fns - > flush_all = l2c310_flush_all_erratum ;
errata [ n + + ] = " 727915 " ;
}
2014-03-15 20:48:07 +04:00
if ( IS_ENABLED ( CONFIG_PL310_ERRATA_753970 ) & &
revision = = L310_CACHE_ID_RTL_R3P0 ) {
sync_reg_offset = L2X0_DUMMY_REG ;
errata [ n + + ] = " 753970 " ;
}
if ( IS_ENABLED ( CONFIG_PL310_ERRATA_769419 ) )
errata [ n + + ] = " 769419 " ;
if ( n ) {
unsigned i ;
pr_info ( " L2C-310 errat%s " , n > 1 ? " a " : " um " ) ;
for ( i = 0 ; i < n ; i + + )
pr_cont ( " %s " , errata [ i ] ) ;
pr_cont ( " enabled \n " ) ;
}
}
static const struct l2c_init_data l2c310_init_fns __initconst = {
. num_lock = 8 ,
. enable = l2c_enable ,
. fixup = l2c310_fixup ,
2014-03-15 20:48:13 +04:00
. save = l2c310_save ,
2014-03-15 20:48:07 +04:00
. outer_cache = {
2014-03-16 00:51:47 +04:00
. inv_range = l2c210_inv_range ,
. clean_range = l2c210_clean_range ,
. flush_range = l2c210_flush_range ,
. flush_all = l2c210_flush_all ,
. disable = l2c_disable ,
. sync = l2c210_sync ,
. set_debug = l2c310_set_debug ,
2014-03-15 20:48:13 +04:00
. resume = l2c310_resume ,
2014-03-15 20:48:07 +04:00
} ,
} ;
2014-03-15 20:47:52 +04:00
static void __init __l2c_init ( const struct l2c_init_data * data ,
u32 aux_val , u32 aux_mask , u32 cache_id )
2007-02-05 16:48:19 +03:00
{
2014-03-15 20:48:07 +04:00
struct outer_cache_fns fns ;
2011-09-18 14:27:30 +04:00
u32 aux ;
u32 way_size = 0 ;
2010-05-05 21:59:37 +04:00
int ways ;
2012-11-06 04:58:07 +04:00
int way_size_shift = L2X0_WAY_SIZE_SHIFT ;
2010-05-05 21:59:37 +04:00
const char * type ;
2007-02-05 16:48:19 +03:00
2014-03-15 20:48:04 +04:00
/*
* It is strange to save the register state before initialisation ,
* but hey , this is what the DT implementations decided to do .
*/
if ( data - > save )
data - > save ( l2x0_base ) ;
2010-07-29 01:01:25 +04:00
aux = readl_relaxed ( l2x0_base + L2X0_AUX_CTRL ) ;
2010-05-05 21:59:37 +04:00
2010-07-08 11:36:21 +04:00
aux & = aux_mask ;
aux | = aux_val ;
2010-05-05 21:59:37 +04:00
/* Determine the number of ways */
2013-03-25 20:02:48 +04:00
switch ( cache_id & L2X0_CACHE_ID_PART_MASK ) {
2010-05-05 21:59:37 +04:00
case L2X0_CACHE_ID_PART_L310 :
if ( aux & ( 1 < < 16 ) )
ways = 16 ;
else
ways = 8 ;
type = " L310 " ;
break ;
2014-03-15 20:48:07 +04:00
2010-05-05 21:59:37 +04:00
case L2X0_CACHE_ID_PART_L210 :
ways = ( aux > > 13 ) & 0xf ;
type = " L210 " ;
break ;
2012-11-06 04:58:07 +04:00
case AURORA_CACHE_ID :
ways = ( aux > > 13 ) & 0xf ;
ways = 2 < < ( ( ways + 1 ) > > 2 ) ;
way_size_shift = AURORA_WAY_SIZE_SHIFT ;
type = " Aurora " ;
break ;
2014-03-15 20:48:07 +04:00
2010-05-05 21:59:37 +04:00
default :
/* Assume unknown chips have 8 ways */
ways = 8 ;
type = " L2x0 series " ;
break ;
}
l2x0_way_mask = ( 1 < < ways ) - 1 ;
2010-07-11 13:05:37 +04:00
/*
* L2 cache Size = Way size * Number of ways
*/
way_size = ( aux & L2X0_AUX_CTRL_WAY_SIZE_MASK ) > > 17 ;
2012-11-06 04:58:07 +04:00
way_size = 1 < < ( way_size + way_size_shift ) ;
2010-07-11 13:05:37 +04:00
l2x0_size = ways * way_size * SZ_1K ;
2014-03-15 20:48:07 +04:00
fns = data - > outer_cache ;
if ( data - > fixup )
data - > fixup ( l2x0_base , cache_id , & fns ) ;
2009-12-02 08:18:03 +03:00
/*
2014-03-15 20:47:57 +04:00
* Check if l2x0 controller is already enabled . If we are booting
* in non - secure mode accessing the below registers will fault .
2009-12-02 08:18:03 +03:00
*/
2014-03-15 20:47:57 +04:00
if ( ! ( readl_relaxed ( l2x0_base + L2X0_CTRL ) & L2X0_CTRL_EN ) )
data - > enable ( l2x0_base , aux , data - > num_lock ) ;
2007-02-05 16:48:19 +03:00
2012-09-03 12:14:56 +04:00
/* Re-read it in case some bits are reserved. */
aux = readl_relaxed ( l2x0_base + L2X0_AUX_CTRL ) ;
/* Save the value for resuming. */
l2x0_saved_regs . aux_ctrl = aux ;
2014-03-15 20:48:07 +04:00
outer_cache = fns ;
2007-02-05 16:48:19 +03:00
2014-03-15 20:48:08 +04:00
pr_info ( " %s cache controller enabled, %d ways, %d kB \n " ,
type , ways , l2x0_size > > 10 ) ;
pr_info ( " %s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x \n " ,
type , cache_id , aux ) ;
2007-02-05 16:48:19 +03:00
}
2011-08-03 21:12:05 +04:00
2014-03-15 20:47:52 +04:00
void __init l2x0_init ( void __iomem * base , u32 aux_val , u32 aux_mask )
{
2014-03-15 20:48:07 +04:00
const struct l2c_init_data * data ;
2014-03-15 20:47:52 +04:00
u32 cache_id ;
l2x0_base = base ;
cache_id = readl_relaxed ( base + L2X0_CACHE_ID ) ;
2014-03-15 20:48:07 +04:00
switch ( cache_id & L2X0_CACHE_ID_PART_MASK ) {
default :
data = & l2x0_init_fns ;
break ;
2014-03-15 22:55:53 +04:00
case L2X0_CACHE_ID_PART_L210 :
data = & l2c210_data ;
break ;
2014-03-15 20:48:07 +04:00
case L2X0_CACHE_ID_PART_L310 :
data = & l2c310_init_fns ;
break ;
}
__l2c_init ( data , aux_val , aux_mask , cache_id ) ;
2014-03-15 20:47:52 +04:00
}
2011-08-03 21:12:05 +04:00
# ifdef CONFIG_OF
2012-11-06 04:58:07 +04:00
static int l2_wt_override ;
2014-03-15 20:47:52 +04:00
/* Aurora don't have the cache ID register available, so we have to
* pass it though the device tree */
static u32 cache_id_part_number_from_dt ;
2014-03-15 20:48:06 +04:00
static void __init l2x0_of_parse ( const struct device_node * np ,
u32 * aux_val , u32 * aux_mask )
{
u32 data [ 2 ] = { 0 , 0 } ;
u32 tag = 0 ;
u32 dirty = 0 ;
u32 val = 0 , mask = 0 ;
of_property_read_u32 ( np , " arm,tag-latency " , & tag ) ;
if ( tag ) {
mask | = L2X0_AUX_CTRL_TAG_LATENCY_MASK ;
val | = ( tag - 1 ) < < L2X0_AUX_CTRL_TAG_LATENCY_SHIFT ;
}
of_property_read_u32_array ( np , " arm,data-latency " ,
data , ARRAY_SIZE ( data ) ) ;
if ( data [ 0 ] & & data [ 1 ] ) {
mask | = L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK ;
val | = ( ( data [ 0 ] - 1 ) < < L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT ) |
( ( data [ 1 ] - 1 ) < < L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT ) ;
}
of_property_read_u32 ( np , " arm,dirty-latency " , & dirty ) ;
if ( dirty ) {
mask | = L2X0_AUX_CTRL_DIRTY_LATENCY_MASK ;
val | = ( dirty - 1 ) < < L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT ;
}
* aux_val & = ~ mask ;
* aux_val | = val ;
* aux_mask & = ~ mask ;
}
2014-03-15 22:55:53 +04:00
static const struct l2c_init_data of_l2c210_data __initconst = {
. num_lock = 1 ,
. of_parse = l2x0_of_parse ,
. enable = l2c_enable ,
. outer_cache = {
. inv_range = l2c210_inv_range ,
. clean_range = l2c210_clean_range ,
. flush_range = l2c210_flush_range ,
. flush_all = l2c210_flush_all ,
. disable = l2c_disable ,
. sync = l2c210_sync ,
. resume = l2c210_resume ,
} ,
} ;
2014-03-15 20:48:06 +04:00
static const struct l2c_init_data of_l2x0_data __initconst = {
. of_parse = l2x0_of_parse ,
2014-03-15 20:47:57 +04:00
. enable = l2x0_enable ,
2014-03-15 20:48:06 +04:00
. outer_cache = {
. inv_range = l2x0_inv_range ,
. clean_range = l2x0_clean_range ,
. flush_range = l2x0_flush_range ,
. flush_all = l2x0_flush_all ,
. disable = l2x0_disable ,
. sync = l2x0_cache_sync ,
. resume = l2x0_resume ,
} ,
} ;
2014-03-16 00:51:47 +04:00
static void __init l2c310_of_parse ( const struct device_node * np ,
u32 * aux_val , u32 * aux_mask )
2014-03-15 20:48:06 +04:00
{
u32 data [ 3 ] = { 0 , 0 , 0 } ;
u32 tag [ 3 ] = { 0 , 0 , 0 } ;
u32 filter [ 2 ] = { 0 , 0 } ;
of_property_read_u32_array ( np , " arm,tag-latency " , tag , ARRAY_SIZE ( tag ) ) ;
if ( tag [ 0 ] & & tag [ 1 ] & & tag [ 2 ] )
writel_relaxed (
( ( tag [ 0 ] - 1 ) < < L2X0_LATENCY_CTRL_RD_SHIFT ) |
( ( tag [ 1 ] - 1 ) < < L2X0_LATENCY_CTRL_WR_SHIFT ) |
( ( tag [ 2 ] - 1 ) < < L2X0_LATENCY_CTRL_SETUP_SHIFT ) ,
l2x0_base + L2X0_TAG_LATENCY_CTRL ) ;
of_property_read_u32_array ( np , " arm,data-latency " ,
data , ARRAY_SIZE ( data ) ) ;
if ( data [ 0 ] & & data [ 1 ] & & data [ 2 ] )
writel_relaxed (
( ( data [ 0 ] - 1 ) < < L2X0_LATENCY_CTRL_RD_SHIFT ) |
( ( data [ 1 ] - 1 ) < < L2X0_LATENCY_CTRL_WR_SHIFT ) |
( ( data [ 2 ] - 1 ) < < L2X0_LATENCY_CTRL_SETUP_SHIFT ) ,
l2x0_base + L2X0_DATA_LATENCY_CTRL ) ;
of_property_read_u32_array ( np , " arm,filter-ranges " ,
filter , ARRAY_SIZE ( filter ) ) ;
if ( filter [ 1 ] ) {
writel_relaxed ( ALIGN ( filter [ 0 ] + filter [ 1 ] , SZ_1M ) ,
l2x0_base + L2X0_ADDR_FILTER_END ) ;
writel_relaxed ( ( filter [ 0 ] & ~ ( SZ_1M - 1 ) ) | L2X0_ADDR_FILTER_EN ,
l2x0_base + L2X0_ADDR_FILTER_START ) ;
}
}
2014-03-16 00:51:47 +04:00
static const struct l2c_init_data of_l2c310_data __initconst = {
2014-03-15 20:47:57 +04:00
. num_lock = 8 ,
2014-03-16 00:51:47 +04:00
. of_parse = l2c310_of_parse ,
2014-03-15 20:47:57 +04:00
. enable = l2c_enable ,
2014-03-15 20:48:07 +04:00
. fixup = l2c310_fixup ,
2014-03-15 20:48:13 +04:00
. save = l2c310_save ,
2014-03-15 20:48:06 +04:00
. outer_cache = {
2014-03-16 00:51:47 +04:00
. inv_range = l2c210_inv_range ,
. clean_range = l2c210_clean_range ,
. flush_range = l2c210_flush_range ,
. flush_all = l2c210_flush_all ,
. disable = l2c_disable ,
. sync = l2c210_sync ,
. set_debug = l2c310_set_debug ,
2014-03-15 20:48:13 +04:00
. resume = l2c310_resume ,
2014-03-15 20:48:06 +04:00
} ,
} ;
2012-11-06 04:58:07 +04:00
/*
* Note that the end addresses passed to Linux primitives are
* noninclusive , while the hardware cache range operations use
* inclusive start and end addresses .
*/
static unsigned long calc_range_end ( unsigned long start , unsigned long end )
{
/*
* Limit the number of cache lines processed at once ,
* since cache range operations stall the CPU pipeline
* until completion .
*/
if ( end > start + MAX_RANGE_SIZE )
end = start + MAX_RANGE_SIZE ;
/*
* Cache range operations can ' t straddle a page boundary .
*/
if ( end > PAGE_ALIGN ( start + 1 ) )
end = PAGE_ALIGN ( start + 1 ) ;
return end ;
}
/*
* Make sure ' start ' and ' end ' reference the same page , as L2 is PIPT
* and range operations only do a TLB lookup on the start address .
*/
static void aurora_pa_range ( unsigned long start , unsigned long end ,
unsigned long offset )
{
unsigned long flags ;
raw_spin_lock_irqsave ( & l2x0_lock , flags ) ;
2013-01-07 14:28:42 +04:00
writel_relaxed ( start , l2x0_base + AURORA_RANGE_BASE_ADDR_REG ) ;
writel_relaxed ( end , l2x0_base + offset ) ;
2012-11-06 04:58:07 +04:00
raw_spin_unlock_irqrestore ( & l2x0_lock , flags ) ;
cache_sync ( ) ;
}
static void aurora_inv_range ( unsigned long start , unsigned long end )
{
/*
* round start and end adresses up to cache line size
*/
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
end = ALIGN ( end , CACHE_LINE_SIZE ) ;
/*
* Invalidate all full cache lines between ' start ' and ' end ' .
*/
while ( start < end ) {
unsigned long range_end = calc_range_end ( start , end ) ;
aurora_pa_range ( start , range_end - CACHE_LINE_SIZE ,
AURORA_INVAL_RANGE_REG ) ;
start = range_end ;
}
}
static void aurora_clean_range ( unsigned long start , unsigned long end )
{
/*
* If L2 is forced to WT , the L2 will always be clean and we
* don ' t need to do anything here .
*/
if ( ! l2_wt_override ) {
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
end = ALIGN ( end , CACHE_LINE_SIZE ) ;
while ( start ! = end ) {
unsigned long range_end = calc_range_end ( start , end ) ;
aurora_pa_range ( start , range_end - CACHE_LINE_SIZE ,
AURORA_CLEAN_RANGE_REG ) ;
start = range_end ;
}
}
}
static void aurora_flush_range ( unsigned long start , unsigned long end )
{
2013-01-07 14:27:14 +04:00
start & = ~ ( CACHE_LINE_SIZE - 1 ) ;
end = ALIGN ( end , CACHE_LINE_SIZE ) ;
while ( start ! = end ) {
unsigned long range_end = calc_range_end ( start , end ) ;
/*
* If L2 is forced to WT , the L2 will always be clean and we
* just need to invalidate .
*/
if ( l2_wt_override )
2012-11-06 04:58:07 +04:00
aurora_pa_range ( start , range_end - CACHE_LINE_SIZE ,
2013-01-07 14:27:14 +04:00
AURORA_INVAL_RANGE_REG ) ;
else
aurora_pa_range ( start , range_end - CACHE_LINE_SIZE ,
AURORA_FLUSH_RANGE_REG ) ;
start = range_end ;
2012-11-06 04:58:07 +04:00
}
}
2014-03-15 20:48:06 +04:00
static void aurora_save ( void __iomem * base )
{
l2x0_saved_regs . ctrl = readl_relaxed ( base + L2X0_CTRL ) ;
l2x0_saved_regs . aux_ctrl = readl_relaxed ( base + L2X0_AUX_CTRL ) ;
}
static void aurora_resume ( void )
{
2014-03-15 20:48:13 +04:00
void __iomem * base = l2x0_base ;
if ( ! ( readl ( base + L2X0_CTRL ) & L2X0_CTRL_EN ) ) {
writel_relaxed ( l2x0_saved_regs . aux_ctrl , base + L2X0_AUX_CTRL ) ;
writel_relaxed ( l2x0_saved_regs . ctrl , base + L2X0_CTRL ) ;
2014-03-15 20:48:06 +04:00
}
}
2014-03-15 20:47:59 +04:00
/*
* For Aurora cache in no outer mode , enable via the CP15 coprocessor
* broadcasting of cache commands to L2 .
*/
static void __init aurora_enable_no_outer ( void __iomem * base , u32 aux ,
unsigned num_lock )
2014-03-15 20:48:06 +04:00
{
2014-03-15 20:47:59 +04:00
u32 u ;
asm volatile ( " mrc p15, 1, %0, c15, c2, 0 " : " =r " ( u ) ) ;
2014-03-15 20:48:06 +04:00
u | = AURORA_CTRL_FW ; /* Set the FW bit */
2014-03-15 20:47:59 +04:00
asm volatile ( " mcr p15, 1, %0, c15, c2, 0 " : : " r " ( u ) ) ;
2014-03-15 20:48:06 +04:00
isb ( ) ;
2014-03-15 20:47:59 +04:00
l2c_enable ( base , aux , num_lock ) ;
2014-03-15 20:48:06 +04:00
}
2014-03-15 20:48:07 +04:00
static void __init aurora_fixup ( void __iomem * base , u32 cache_id ,
struct outer_cache_fns * fns )
{
sync_reg_offset = AURORA_SYNC_REG ;
}
2014-03-15 20:48:06 +04:00
static void __init aurora_of_parse ( const struct device_node * np ,
u32 * aux_val , u32 * aux_mask )
{
u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU ;
u32 mask = AURORA_ACR_REPLACEMENT_MASK ;
of_property_read_u32 ( np , " cache-id-part " ,
& cache_id_part_number_from_dt ) ;
/* Determine and save the write policy */
l2_wt_override = of_property_read_bool ( np , " wt-override " ) ;
if ( l2_wt_override ) {
val | = AURORA_ACR_FORCE_WRITE_THRO_POLICY ;
mask | = AURORA_ACR_FORCE_WRITE_POLICY_MASK ;
}
* aux_val & = ~ mask ;
* aux_val | = val ;
* aux_mask & = ~ mask ;
}
static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
2014-03-15 20:47:57 +04:00
. num_lock = 4 ,
2014-03-15 20:48:06 +04:00
. of_parse = aurora_of_parse ,
2014-03-15 20:47:57 +04:00
. enable = l2c_enable ,
2014-03-15 20:48:07 +04:00
. fixup = aurora_fixup ,
2014-03-15 20:48:06 +04:00
. save = aurora_save ,
. outer_cache = {
. inv_range = aurora_inv_range ,
. clean_range = aurora_clean_range ,
. flush_range = aurora_flush_range ,
. flush_all = l2x0_flush_all ,
. disable = l2x0_disable ,
. sync = l2x0_cache_sync ,
. resume = aurora_resume ,
} ,
} ;
static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
2014-03-15 20:47:57 +04:00
. num_lock = 4 ,
2014-03-15 20:48:06 +04:00
. of_parse = aurora_of_parse ,
2014-03-15 20:47:59 +04:00
. enable = aurora_enable_no_outer ,
2014-03-15 20:48:07 +04:00
. fixup = aurora_fixup ,
2014-03-15 20:48:06 +04:00
. save = aurora_save ,
. outer_cache = {
. resume = aurora_resume ,
} ,
} ;
2013-05-10 01:21:01 +04:00
/*
* For certain Broadcom SoCs , depending on the address range , different offsets
* need to be added to the address before passing it to L2 for
* invalidation / clean / flush
*
* Section Address Range Offset EMI
* 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
* 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
* 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
*
* When the start and end addresses have crossed two different sections , we
* need to break the L2 operation into two , each within its own section .
* For example , if we need to invalidate addresses starts at 0xBFFF0000 and
* ends at 0xC0001000 , we need do invalidate 1 ) 0xBFFF0000 - 0xBFFFFFFF and 2 )
* 0xC0000000 - 0xC0001000
*
* Note 1 :
* By breaking a single L2 operation into two , we may potentially suffer some
* performance hit , but keep in mind the cross section case is very rare
*
* Note 2 :
* We do not need to handle the case when the start address is in
* Section 1 and the end address is in Section 3 , since it is not a valid use
* case
*
* Note 3 :
* Section 1 in practical terms can no longer be used on rev A2 . Because of
* that the code does not need to handle section 1 at all .
*
*/
# define BCM_SYS_EMI_START_ADDR 0x40000000UL
# define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
# define BCM_SYS_EMI_OFFSET 0x40000000UL
# define BCM_VC_EMI_OFFSET 0x80000000UL
static inline int bcm_addr_is_sys_emi ( unsigned long addr )
{
return ( addr > = BCM_SYS_EMI_START_ADDR ) & &
( addr < BCM_VC_EMI_SEC3_START_ADDR ) ;
}
static inline unsigned long bcm_l2_phys_addr ( unsigned long addr )
{
if ( bcm_addr_is_sys_emi ( addr ) )
return addr + BCM_SYS_EMI_OFFSET ;
else
return addr + BCM_VC_EMI_OFFSET ;
}
static void bcm_inv_range ( unsigned long start , unsigned long end )
{
unsigned long new_start , new_end ;
BUG_ON ( start < BCM_SYS_EMI_START_ADDR ) ;
if ( unlikely ( end < = start ) )
return ;
new_start = bcm_l2_phys_addr ( start ) ;
new_end = bcm_l2_phys_addr ( end ) ;
/* normal case, no cross section between start and end */
if ( likely ( bcm_addr_is_sys_emi ( end ) | | ! bcm_addr_is_sys_emi ( start ) ) ) {
l2x0_inv_range ( new_start , new_end ) ;
return ;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2x0_inv_range ( new_start ,
bcm_l2_phys_addr ( BCM_VC_EMI_SEC3_START_ADDR - 1 ) ) ;
l2x0_inv_range ( bcm_l2_phys_addr ( BCM_VC_EMI_SEC3_START_ADDR ) ,
new_end ) ;
}
static void bcm_clean_range ( unsigned long start , unsigned long end )
{
unsigned long new_start , new_end ;
BUG_ON ( start < BCM_SYS_EMI_START_ADDR ) ;
if ( unlikely ( end < = start ) )
return ;
if ( ( end - start ) > = l2x0_size ) {
l2x0_clean_all ( ) ;
return ;
}
new_start = bcm_l2_phys_addr ( start ) ;
new_end = bcm_l2_phys_addr ( end ) ;
/* normal case, no cross section between start and end */
if ( likely ( bcm_addr_is_sys_emi ( end ) | | ! bcm_addr_is_sys_emi ( start ) ) ) {
l2x0_clean_range ( new_start , new_end ) ;
return ;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2x0_clean_range ( new_start ,
bcm_l2_phys_addr ( BCM_VC_EMI_SEC3_START_ADDR - 1 ) ) ;
l2x0_clean_range ( bcm_l2_phys_addr ( BCM_VC_EMI_SEC3_START_ADDR ) ,
new_end ) ;
}
static void bcm_flush_range ( unsigned long start , unsigned long end )
{
unsigned long new_start , new_end ;
BUG_ON ( start < BCM_SYS_EMI_START_ADDR ) ;
if ( unlikely ( end < = start ) )
return ;
if ( ( end - start ) > = l2x0_size ) {
l2x0_flush_all ( ) ;
return ;
}
new_start = bcm_l2_phys_addr ( start ) ;
new_end = bcm_l2_phys_addr ( end ) ;
/* normal case, no cross section between start and end */
if ( likely ( bcm_addr_is_sys_emi ( end ) | | ! bcm_addr_is_sys_emi ( start ) ) ) {
l2x0_flush_range ( new_start , new_end ) ;
return ;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2x0_flush_range ( new_start ,
bcm_l2_phys_addr ( BCM_VC_EMI_SEC3_START_ADDR - 1 ) ) ;
l2x0_flush_range ( bcm_l2_phys_addr ( BCM_VC_EMI_SEC3_START_ADDR ) ,
new_end ) ;
}
2014-03-15 20:48:06 +04:00
static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
2014-03-15 20:47:57 +04:00
. num_lock = 8 ,
2014-03-16 00:51:47 +04:00
. of_parse = l2c310_of_parse ,
2014-03-15 20:47:57 +04:00
. enable = l2c_enable ,
2014-03-15 20:48:07 +04:00
. fixup = l2c310_fixup ,
2014-03-15 20:48:13 +04:00
. save = l2c310_save ,
2014-03-15 20:48:06 +04:00
. outer_cache = {
. inv_range = bcm_inv_range ,
. clean_range = bcm_clean_range ,
. flush_range = bcm_flush_range ,
2014-03-16 00:51:47 +04:00
. flush_all = l2c210_flush_all ,
. disable = l2c_disable ,
. sync = l2c210_sync ,
2014-03-15 20:48:13 +04:00
. resume = l2c310_resume ,
2014-03-15 20:48:06 +04:00
} ,
} ;
2012-11-06 04:58:07 +04:00
2014-03-15 20:47:55 +04:00
static void __init tauros3_save ( void __iomem * base )
2013-12-13 19:42:19 +04:00
{
l2x0_saved_regs . aux2_ctrl =
2014-03-15 20:47:55 +04:00
readl_relaxed ( base + TAUROS3_AUX2_CTRL ) ;
2013-12-13 19:42:19 +04:00
l2x0_saved_regs . prefetch_ctrl =
2014-03-15 20:47:55 +04:00
readl_relaxed ( base + L2X0_PREFETCH_CTRL ) ;
2013-12-13 19:42:19 +04:00
}
static void tauros3_resume ( void )
{
2014-03-15 20:48:13 +04:00
void __iomem * base = l2x0_base ;
if ( ! ( readl_relaxed ( base + L2X0_CTRL ) & L2X0_CTRL_EN ) ) {
2013-12-13 19:42:19 +04:00
writel_relaxed ( l2x0_saved_regs . aux2_ctrl ,
2014-03-15 20:48:13 +04:00
base + TAUROS3_AUX2_CTRL ) ;
2013-12-13 19:42:19 +04:00
writel_relaxed ( l2x0_saved_regs . prefetch_ctrl ,
2014-03-15 20:48:13 +04:00
base + L2X0_PREFETCH_CTRL ) ;
2013-12-13 19:42:19 +04:00
2014-03-15 20:48:13 +04:00
l2c_enable ( base , l2x0_saved_regs . aux_ctrl , 8 ) ;
}
2013-12-13 19:42:19 +04:00
}
2014-03-15 20:47:54 +04:00
static const struct l2c_init_data of_tauros3_data __initconst = {
2014-03-15 20:47:57 +04:00
. num_lock = 8 ,
. enable = l2c_enable ,
2013-12-13 19:42:19 +04:00
. save = tauros3_save ,
/* Tauros3 broadcasts L1 cache operations to L2 */
. outer_cache = {
. resume = tauros3_resume ,
} ,
} ;
2014-03-15 20:48:01 +04:00
# define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
2011-08-03 21:12:05 +04:00
static const struct of_device_id l2x0_ids [ ] __initconst = {
2014-03-15 22:55:53 +04:00
L2C_ID ( " arm,l210-cache " , of_l2c210_data ) ,
2014-03-15 20:47:54 +04:00
L2C_ID ( " arm,l220-cache " , of_l2x0_data ) ,
2014-03-16 00:51:47 +04:00
L2C_ID ( " arm,pl310-cache " , of_l2c310_data ) ,
2014-03-15 20:47:54 +04:00
L2C_ID ( " brcm,bcm11351-a2-pl310-cache " , of_bcm_l2x0_data ) ,
L2C_ID ( " marvell,aurora-outer-cache " , of_aurora_with_outer_data ) ,
L2C_ID ( " marvell,aurora-system-cache " , of_aurora_no_outer_data ) ,
L2C_ID ( " marvell,tauros3-cache " , of_tauros3_data ) ,
2014-03-15 20:48:01 +04:00
/* Deprecated IDs */
2014-03-15 20:47:54 +04:00
L2C_ID ( " bcm,bcm11351-a2-pl310-cache " , of_bcm_l2x0_data ) ,
2011-08-03 21:12:05 +04:00
{ }
} ;
2011-09-18 14:27:30 +04:00
int __init l2x0_of_init ( u32 aux_val , u32 aux_mask )
2011-08-03 21:12:05 +04:00
{
2014-03-15 20:47:54 +04:00
const struct l2c_init_data * data ;
2011-08-03 21:12:05 +04:00
struct device_node * np ;
2011-09-30 17:43:12 +04:00
struct resource res ;
2014-03-15 20:47:52 +04:00
u32 cache_id ;
2011-08-03 21:12:05 +04:00
np = of_find_matching_node ( NULL , l2x0_ids ) ;
if ( ! np )
return - ENODEV ;
2011-09-30 17:43:12 +04:00
if ( of_address_to_resource ( np , 0 , & res ) )
return - ENODEV ;
l2x0_base = ioremap ( res . start , resource_size ( & res ) ) ;
2011-08-03 21:12:05 +04:00
if ( ! l2x0_base )
return - ENOMEM ;
2011-09-30 17:43:12 +04:00
l2x0_saved_regs . phy_base = res . start ;
data = of_match_node ( l2x0_ids , np ) - > data ;
2011-08-03 21:12:05 +04:00
/* L2 configuration can only be changed if the cache is disabled */
2014-03-15 20:47:59 +04:00
if ( ! ( readl_relaxed ( l2x0_base + L2X0_CTRL ) & L2X0_CTRL_EN ) )
2014-03-15 20:47:54 +04:00
if ( data - > of_parse )
data - > of_parse ( np , & aux_val , & aux_mask ) ;
2012-11-06 04:58:07 +04:00
2014-03-15 20:47:52 +04:00
if ( cache_id_part_number_from_dt )
cache_id = cache_id_part_number_from_dt ;
else
cache_id = readl_relaxed ( l2x0_base + L2X0_CACHE_ID ) ;
__l2c_init ( data , aux_val , aux_mask , cache_id ) ;
2012-10-01 13:56:42 +04:00
2011-08-03 21:12:05 +04:00
return 0 ;
}
# endif