2014-01-15 10:31:52 +00:00
/*
* Copyright ( C ) 2013 Imagination Technologies
2017-10-25 17:04:33 -07:00
* Author : Paul Burton < paul . burton @ mips . com >
2014-01-15 10:31:52 +00:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*/
# include <linux/errno.h>
2014-04-14 12:04:27 +01:00
# include <linux/percpu.h>
2018-01-19 16:40:49 +01:00
# include <linux/of.h>
# include <linux/of_address.h>
2014-04-14 12:04:27 +01:00
# include <linux/spinlock.h>
2014-01-15 10:31:52 +00:00
2017-08-12 19:49:41 -07:00
# include <asm/mips-cps.h>
2014-01-15 10:31:52 +00:00
void __iomem * mips_cpc_base ;
2014-02-14 09:28:06 +00:00
static DEFINE_PER_CPU_ALIGNED ( spinlock_t , cpc_core_lock ) ;
static DEFINE_PER_CPU_ALIGNED ( unsigned long , cpc_core_lock_flags ) ;
2016-10-15 23:03:43 +01:00
phys_addr_t __weak mips_cpc_default_phys_base ( void )
{
2018-01-19 16:40:49 +01:00
struct device_node * cpc_node ;
struct resource res ;
int err ;
cpc_node = of_find_compatible_node ( of_root , NULL , " mti,mips-cpc " ) ;
if ( cpc_node ) {
err = of_address_to_resource ( cpc_node , 0 , & res ) ;
if ( ! err )
return res . start ;
}
2016-10-15 23:03:43 +01:00
return 0 ;
}
2015-07-12 18:10:56 -05:00
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
* This function returns the physical base address of the Cluster Power
* Controller memory mapped registers , or 0 if no Cluster Power Controller
* is present .
*/
static phys_addr_t mips_cpc_phys_base ( void )
2014-01-15 10:31:52 +00:00
{
2015-07-09 10:40:46 +01:00
unsigned long cpc_base ;
2014-01-15 10:31:52 +00:00
if ( ! mips_cm_present ( ) )
return 0 ;
2017-08-12 19:49:27 -07:00
if ( ! ( read_gcr_cpc_status ( ) & CM_GCR_CPC_STATUS_EX ) )
2014-01-15 10:31:52 +00:00
return 0 ;
/* If the CPC is already enabled, leave it so */
cpc_base = read_gcr_cpc_base ( ) ;
2017-08-12 19:49:27 -07:00
if ( cpc_base & CM_GCR_CPC_BASE_CPCEN )
return cpc_base & CM_GCR_CPC_BASE_CPCBASE ;
2014-01-15 10:31:52 +00:00
2016-10-15 23:03:43 +01:00
/* Otherwise, use the default address */
2014-01-15 10:31:52 +00:00
cpc_base = mips_cpc_default_phys_base ( ) ;
2016-10-15 23:03:43 +01:00
if ( ! cpc_base )
return cpc_base ;
/* Enable the CPC, mapped at the default address */
2017-08-12 19:49:27 -07:00
write_gcr_cpc_base ( cpc_base | CM_GCR_CPC_BASE_CPCEN ) ;
2014-01-15 10:31:52 +00:00
return cpc_base ;
}
int mips_cpc_probe ( void )
{
2014-11-22 00:22:09 +01:00
phys_addr_t addr ;
2016-09-07 10:45:09 +01:00
unsigned int cpu ;
2014-02-14 09:28:06 +00:00
for_each_possible_cpu ( cpu )
spin_lock_init ( & per_cpu ( cpc_core_lock , cpu ) ) ;
2014-01-15 10:31:52 +00:00
addr = mips_cpc_phys_base ( ) ;
if ( ! addr )
return - ENODEV ;
mips_cpc_base = ioremap_nocache ( addr , 0x8000 ) ;
if ( ! mips_cpc_base )
return - ENXIO ;
return 0 ;
}
2014-02-14 09:28:06 +00:00
void mips_cpc_lock_other ( unsigned int core )
{
2016-09-07 10:45:09 +01:00
unsigned int curr_core ;
2016-09-07 10:45:10 +01:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 )
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
return ;
2014-02-14 09:28:06 +00:00
preempt_disable ( ) ;
2017-08-12 19:49:35 -07:00
curr_core = cpu_core ( & current_cpu_data ) ;
2014-02-14 09:28:06 +00:00
spin_lock_irqsave ( & per_cpu ( cpc_core_lock , curr_core ) ,
per_cpu ( cpc_core_lock_flags , curr_core ) ) ;
2017-08-12 19:49:29 -07:00
write_cpc_cl_other ( core < < __ffs ( CPC_Cx_OTHER_CORENUM ) ) ;
2015-09-22 11:12:18 -07:00
/*
* Ensure the core - other region reflects the appropriate core &
* VP before any accesses to it occur .
*/
mb ( ) ;
2014-02-14 09:28:06 +00:00
}
void mips_cpc_unlock_other ( void )
{
2016-09-07 10:45:10 +01:00
unsigned int curr_core ;
if ( mips_cm_revision ( ) > = CM_REV_CM3 )
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
return ;
2017-08-12 19:49:35 -07:00
curr_core = cpu_core ( & current_cpu_data ) ;
2014-02-14 09:28:06 +00:00
spin_unlock_irqrestore ( & per_cpu ( cpc_core_lock , curr_core ) ,
per_cpu ( cpc_core_lock_flags , curr_core ) ) ;
preempt_enable ( ) ;
}