2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-05-03 04:56:52 +04:00
/*
* arch / arm / mach - vexpress / dcscb . c - Dual Cluster System Configuration Block
*
* Created by : Nicolas Pitre , May 2012
* Copyright : ( C ) 2012 - 2013 Linaro Limited
*/
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/io.h>
# include <linux/errno.h>
# include <linux/of_address.h>
# include <linux/vexpress.h>
2012-07-17 17:25:44 +04:00
# include <linux/arm-cci.h>
2012-05-03 04:56:52 +04:00
# include <asm/mcpm.h>
# include <asm/proc-fns.h>
# include <asm/cacheflush.h>
# include <asm/cputype.h>
# include <asm/cp15.h>
2020-04-29 23:58:10 +03:00
# include "core.h"
2012-05-03 04:56:52 +04:00
# define RST_HOLD0 0x0
# define RST_HOLD1 0x4
# define SYS_SWRESET 0x8
# define RST_STAT0 0xc
# define RST_STAT1 0x10
# define EAG_CFG_R 0x20
# define EAG_CFG_W 0x24
# define KFC_CFG_R 0x28
# define KFC_CFG_W 0x2c
# define DCS_CFG_R 0x30
static void __iomem * dcscb_base ;
2012-07-19 00:41:16 +04:00
static int dcscb_allcpus_mask [ 2 ] ;
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
static int dcscb_cpu_powerup ( unsigned int cpu , unsigned int cluster )
2012-05-03 04:56:52 +04:00
{
unsigned int rst_hold , cpumask = ( 1 < < cpu ) ;
pr_debug ( " %s: cpu %u cluster %u \n " , __func__ , cpu , cluster ) ;
2015-03-15 03:29:44 +03:00
if ( cluster > = 2 | | ! ( cpumask & dcscb_allcpus_mask [ cluster ] ) )
2012-05-03 04:56:52 +04:00
return - EINVAL ;
2015-03-15 04:13:48 +03:00
rst_hold = readl_relaxed ( dcscb_base + RST_HOLD0 + cluster * 4 ) ;
rst_hold & = ~ ( cpumask | ( cpumask < < 4 ) ) ;
writel_relaxed ( rst_hold , dcscb_base + RST_HOLD0 + cluster * 4 ) ;
return 0 ;
}
2014-04-14 11:42:01 +04:00
2015-03-15 04:13:48 +03:00
static int dcscb_cluster_powerup ( unsigned int cluster )
{
unsigned int rst_hold ;
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
pr_debug ( " %s: cluster %u \n " , __func__ , cluster ) ;
if ( cluster > = 2 )
return - EINVAL ;
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
/* remove cluster reset and add individual CPU's reset */
rst_hold = readl_relaxed ( dcscb_base + RST_HOLD0 + cluster * 4 ) ;
rst_hold & = ~ ( 1 < < 8 ) ;
rst_hold | = dcscb_allcpus_mask [ cluster ] ;
writel_relaxed ( rst_hold , dcscb_base + RST_HOLD0 + cluster * 4 ) ;
2012-05-03 04:56:52 +04:00
return 0 ;
}
2015-03-15 04:13:48 +03:00
static void dcscb_cpu_powerdown_prepare ( unsigned int cpu , unsigned int cluster )
2012-05-03 04:56:52 +04:00
{
2015-03-15 04:13:48 +03:00
unsigned int rst_hold ;
2012-05-03 04:56:52 +04:00
pr_debug ( " %s: cpu %u cluster %u \n " , __func__ , cpu , cluster ) ;
2015-03-15 04:13:48 +03:00
BUG_ON ( cluster > = 2 | | ! ( ( 1 < < cpu ) & dcscb_allcpus_mask [ cluster ] ) ) ;
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
rst_hold = readl_relaxed ( dcscb_base + RST_HOLD0 + cluster * 4 ) ;
rst_hold | = ( 1 < < cpu ) ;
writel_relaxed ( rst_hold , dcscb_base + RST_HOLD0 + cluster * 4 ) ;
}
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
static void dcscb_cluster_powerdown_prepare ( unsigned int cluster )
{
unsigned int rst_hold ;
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
pr_debug ( " %s: cluster %u \n " , __func__ , cluster ) ;
BUG_ON ( cluster > = 2 ) ;
rst_hold = readl_relaxed ( dcscb_base + RST_HOLD0 + cluster * 4 ) ;
rst_hold | = ( 1 < < 8 ) ;
writel_relaxed ( rst_hold , dcscb_base + RST_HOLD0 + cluster * 4 ) ;
2012-05-03 04:56:52 +04:00
}
2015-03-15 04:13:48 +03:00
static void dcscb_cpu_cache_disable ( void )
{
/* Disable and flush the local CPU cache. */
v7_exit_coherency_flush ( louis ) ;
}
2012-05-03 04:56:52 +04:00
2015-03-15 04:13:48 +03:00
static void dcscb_cluster_cache_disable ( void )
2012-07-17 06:07:10 +04:00
{
2015-03-15 04:13:48 +03:00
/* Flush all cache levels for this cluster. */
v7_exit_coherency_flush ( all ) ;
2012-07-17 06:07:10 +04:00
2015-03-15 04:13:48 +03:00
/*
* A full outer cache flush could be needed at this point
* on platforms with such a cache , depending on where the
* outer cache sits . In some cases the notion of a " last
* cluster standing " would need to be implemented if the
* outer cache is shared across clusters . In any case , when
* the outer cache needs flushing , there is no concurrent
* access to the cache controller to worry about and no
* special locking besides what is already provided by the
* MCPM state machinery is needed .
*/
2012-07-17 06:07:10 +04:00
2015-03-15 04:13:48 +03:00
/*
* Disable cluster - level coherency by masking
* incoming snoops and DVM messages :
*/
cci_disable_port_by_cpu ( read_cpuid_mpidr ( ) ) ;
2012-07-17 06:07:10 +04:00
}
2015-03-15 04:13:48 +03:00
static const struct mcpm_platform_ops dcscb_power_ops = {
. cpu_powerup = dcscb_cpu_powerup ,
. cluster_powerup = dcscb_cluster_powerup ,
. cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare ,
. cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare ,
. cpu_cache_disable = dcscb_cpu_cache_disable ,
. cluster_cache_disable = dcscb_cluster_cache_disable ,
} ;
2012-07-17 17:25:44 +04:00
extern void dcscb_power_up_setup ( unsigned int affinity_level ) ;
2012-05-03 04:56:52 +04:00
static int __init dcscb_init ( void )
{
struct device_node * node ;
2012-07-19 00:41:16 +04:00
unsigned int cfg ;
2012-05-03 04:56:52 +04:00
int ret ;
2012-07-17 17:25:44 +04:00
if ( ! cci_probed ( ) )
return - ENODEV ;
2012-05-03 04:56:52 +04:00
node = of_find_compatible_node ( NULL , NULL , " arm,rtsm,dcscb " ) ;
if ( ! node )
return - ENODEV ;
dcscb_base = of_iomap ( node , 0 ) ;
if ( ! dcscb_base )
return - EADDRNOTAVAIL ;
2012-07-19 00:41:16 +04:00
cfg = readl_relaxed ( dcscb_base + DCS_CFG_R ) ;
dcscb_allcpus_mask [ 0 ] = ( 1 < < ( ( ( cfg > > 16 ) > > ( 0 < < 2 ) ) & 0xf ) ) - 1 ;
dcscb_allcpus_mask [ 1 ] = ( 1 < < ( ( ( cfg > > 16 ) > > ( 1 < < 2 ) ) & 0xf ) ) - 1 ;
2012-07-17 06:07:10 +04:00
2012-05-03 04:56:52 +04:00
ret = mcpm_platform_register ( & dcscb_power_ops ) ;
2012-07-17 17:25:44 +04:00
if ( ! ret )
ret = mcpm_sync_init ( dcscb_power_up_setup ) ;
2012-05-03 04:56:52 +04:00
if ( ret ) {
iounmap ( dcscb_base ) ;
return ret ;
}
pr_info ( " VExpress DCSCB support installed \n " ) ;
/*
* Future entries into the kernel can now go
* through the cluster entry vectors .
*/
2017-01-15 05:59:29 +03:00
vexpress_flags_set ( __pa_symbol ( mcpm_entry_point ) ) ;
2012-05-03 04:56:52 +04:00
return 0 ;
}
early_initcall ( dcscb_init ) ;