2012-07-13 18:55:52 +04:00
/*
* CCI cache coherent interconnect driver
*
* Copyright ( C ) 2013 ARM Ltd .
* Author : Lorenzo Pieralisi < lorenzo . pieralisi @ arm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed " as is " WITHOUT ANY WARRANTY of any
* kind , whether express or implied ; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
# include <linux/arm-cci.h>
# include <linux/io.h>
2014-06-30 15:20:21 +04:00
# include <linux/interrupt.h>
2012-07-13 18:55:52 +04:00
# include <linux/module.h>
# include <linux/of_address.h>
2013-08-22 17:41:51 +04:00
# include <linux/of_irq.h>
# include <linux/of_platform.h>
2014-06-30 15:20:21 +04:00
# include <linux/perf_event.h>
2013-08-22 17:41:51 +04:00
# include <linux/platform_device.h>
2012-07-13 18:55:52 +04:00
# include <linux/slab.h>
2013-08-22 17:41:51 +04:00
# include <linux/spinlock.h>
2012-07-13 18:55:52 +04:00
# include <asm/cacheflush.h>
# include <asm/smp_plat.h>
2013-08-22 17:41:51 +04:00
# define DRIVER_NAME "CCI-400"
# define DRIVER_NAME_PMU DRIVER_NAME " PMU"
2012-07-13 18:55:52 +04:00
# define CCI_PORT_CTRL 0x0
# define CCI_CTRL_STATUS 0xc
# define CCI_ENABLE_SNOOP_REQ 0x1
# define CCI_ENABLE_DVM_REQ 0x2
# define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
struct cci_nb_ports {
unsigned int nb_ace ;
unsigned int nb_ace_lite ;
} ;
enum cci_ace_port_type {
ACE_INVALID_PORT = 0x0 ,
ACE_PORT ,
ACE_LITE_PORT ,
} ;
struct cci_ace_port {
void __iomem * base ;
2013-05-22 07:34:41 +04:00
unsigned long phys ;
2012-07-13 18:55:52 +04:00
enum cci_ace_port_type type ;
struct device_node * dn ;
} ;
static struct cci_ace_port * ports ;
static unsigned int nb_cci_ports ;
static void __iomem * cci_ctrl_base ;
2013-05-22 07:34:41 +04:00
static unsigned long cci_ctrl_phys ;
2012-07-13 18:55:52 +04:00
2013-08-22 17:41:51 +04:00
# ifdef CONFIG_HW_PERF_EVENTS
# define CCI_PMCR 0x0100
# define CCI_PID2 0x0fe8
# define CCI_PMCR_CEN 0x00000001
# define CCI_PMCR_NCNT_MASK 0x0000f800
# define CCI_PMCR_NCNT_SHIFT 11
# define CCI_PID2_REV_MASK 0xf0
# define CCI_PID2_REV_SHIFT 4
/* Port ids */
# define CCI_PORT_S0 0
# define CCI_PORT_S1 1
# define CCI_PORT_S2 2
# define CCI_PORT_S3 3
# define CCI_PORT_S4 4
# define CCI_PORT_M0 5
# define CCI_PORT_M1 6
# define CCI_PORT_M2 7
# define CCI_REV_R0 0
# define CCI_REV_R1 1
2014-02-19 16:17:02 +04:00
# define CCI_REV_R1_PX 5
2013-08-22 17:41:51 +04:00
# define CCI_PMU_EVT_SEL 0x000
# define CCI_PMU_CNTR 0x004
# define CCI_PMU_CNTR_CTRL 0x008
# define CCI_PMU_OVRFLW 0x00c
# define CCI_PMU_OVRFLW_FLAG 1
# define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
2014-06-30 15:20:21 +04:00
# define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
2013-08-22 17:41:51 +04:00
/*
* Instead of an event id to monitor CCI cycles , a dedicated counter is
* provided . Use 0xff to represent CCI cycles and hope that no future revisions
* make use of this event in hardware .
*/
enum cci400_perf_events {
CCI_PMU_CYCLES = 0xff
} ;
# define CCI_PMU_EVENT_MASK 0xff
# define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
# define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
# define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
# define CCI_PMU_CYCLE_CNTR_IDX 0
# define CCI_PMU_CNTR0_IDX 1
# define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
/*
* CCI PMU event id is an 8 - bit value made of two parts - bits 7 : 5 for one of 8
* ports and bits 4 : 0 are event codes . There are different event codes
* associated with each port type .
*
* Additionally , the range of events associated with the port types changed
* between Rev0 and Rev1 .
*
* The constants below define the range of valid codes for each port type for
* the different revisions and are used to validate the event to be monitored .
*/
# define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
# define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
# define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
# define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
# define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
# define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
# define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
# define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
struct pmu_port_event_ranges {
u8 slave_min ;
u8 slave_max ;
u8 master_min ;
u8 master_max ;
} ;
static struct pmu_port_event_ranges port_event_range [ ] = {
[ CCI_REV_R0 ] = {
. slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV ,
. slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV ,
. master_min = CCI_REV_R0_MASTER_PORT_MIN_EV ,
. master_max = CCI_REV_R0_MASTER_PORT_MAX_EV ,
} ,
[ CCI_REV_R1 ] = {
. slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV ,
. slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV ,
. master_min = CCI_REV_R1_MASTER_PORT_MIN_EV ,
. master_max = CCI_REV_R1_MASTER_PORT_MAX_EV ,
} ,
} ;
2014-02-19 16:17:03 +04:00
/*
* Export different PMU names for the different revisions so userspace knows
* because the event ids are different
*/
static char * const pmu_names [ ] = {
[ CCI_REV_R0 ] = " CCI_400 " ,
[ CCI_REV_R1 ] = " CCI_400_r1 " ,
} ;
2014-06-30 15:20:21 +04:00
struct cci_pmu_hw_events {
struct perf_event * events [ CCI_PMU_MAX_HW_EVENTS ] ;
unsigned long used_mask [ BITS_TO_LONGS ( CCI_PMU_MAX_HW_EVENTS ) ] ;
raw_spinlock_t pmu_lock ;
} ;
struct cci_pmu {
2013-08-22 17:41:51 +04:00
void __iomem * base ;
2014-06-30 15:20:21 +04:00
struct pmu pmu ;
2013-08-22 17:41:51 +04:00
int nr_irqs ;
int irqs [ CCI_PMU_MAX_HW_EVENTS ] ;
unsigned long active_irqs ;
struct pmu_port_event_ranges * port_ranges ;
2014-06-30 15:20:21 +04:00
struct cci_pmu_hw_events hw_events ;
struct platform_device * plat_device ;
int num_events ;
atomic_t active_events ;
struct mutex reserve_mutex ;
cpumask_t cpus ;
2013-08-22 17:41:51 +04:00
} ;
2014-06-30 15:20:21 +04:00
static struct cci_pmu * pmu ;
# define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
2013-08-22 17:41:51 +04:00
static bool is_duplicate_irq ( int irq , int * irqs , int nr_irqs )
{
int i ;
for ( i = 0 ; i < nr_irqs ; i + + )
if ( irq = = irqs [ i ] )
return true ;
return false ;
}
static int probe_cci_revision ( void )
{
int rev ;
rev = readl_relaxed ( cci_ctrl_base + CCI_PID2 ) & CCI_PID2_REV_MASK ;
rev > > = CCI_PID2_REV_SHIFT ;
2014-02-19 16:17:02 +04:00
if ( rev < CCI_REV_R1_PX )
2013-08-22 17:41:51 +04:00
return CCI_REV_R0 ;
2014-02-19 16:17:02 +04:00
else
2013-08-22 17:41:51 +04:00
return CCI_REV_R1 ;
}
static struct pmu_port_event_ranges * port_range_by_rev ( void )
{
int rev = probe_cci_revision ( ) ;
return & port_event_range [ rev ] ;
}
static int pmu_is_valid_slave_event ( u8 ev_code )
{
return pmu - > port_ranges - > slave_min < = ev_code & &
ev_code < = pmu - > port_ranges - > slave_max ;
}
static int pmu_is_valid_master_event ( u8 ev_code )
{
return pmu - > port_ranges - > master_min < = ev_code & &
ev_code < = pmu - > port_ranges - > master_max ;
}
static int pmu_validate_hw_event ( u8 hw_event )
{
u8 ev_source = CCI_PMU_EVENT_SOURCE ( hw_event ) ;
u8 ev_code = CCI_PMU_EVENT_CODE ( hw_event ) ;
switch ( ev_source ) {
case CCI_PORT_S0 :
case CCI_PORT_S1 :
case CCI_PORT_S2 :
case CCI_PORT_S3 :
case CCI_PORT_S4 :
/* Slave Interface */
if ( pmu_is_valid_slave_event ( ev_code ) )
return hw_event ;
break ;
case CCI_PORT_M0 :
case CCI_PORT_M1 :
case CCI_PORT_M2 :
/* Master Interface */
if ( pmu_is_valid_master_event ( ev_code ) )
return hw_event ;
break ;
}
return - ENOENT ;
}
2014-06-30 15:20:21 +04:00
static int pmu_is_valid_counter ( struct cci_pmu * cci_pmu , int idx )
2013-08-22 17:41:51 +04:00
{
return CCI_PMU_CYCLE_CNTR_IDX < = idx & &
idx < = CCI_PMU_CNTR_LAST ( cci_pmu ) ;
}
static u32 pmu_read_register ( int idx , unsigned int offset )
{
return readl_relaxed ( pmu - > base + CCI_PMU_CNTR_BASE ( idx ) + offset ) ;
}
static void pmu_write_register ( u32 value , int idx , unsigned int offset )
{
return writel_relaxed ( value , pmu - > base + CCI_PMU_CNTR_BASE ( idx ) + offset ) ;
}
static void pmu_disable_counter ( int idx )
{
pmu_write_register ( 0 , idx , CCI_PMU_CNTR_CTRL ) ;
}
static void pmu_enable_counter ( int idx )
{
pmu_write_register ( 1 , idx , CCI_PMU_CNTR_CTRL ) ;
}
static void pmu_set_event ( int idx , unsigned long event )
{
event & = CCI_PMU_EVENT_MASK ;
pmu_write_register ( event , idx , CCI_PMU_EVT_SEL ) ;
}
static u32 pmu_get_max_counters ( void )
{
u32 n_cnts = ( readl_relaxed ( cci_ctrl_base + CCI_PMCR ) &
CCI_PMCR_NCNT_MASK ) > > CCI_PMCR_NCNT_SHIFT ;
/* add 1 for cycle counter */
return n_cnts + 1 ;
}
2014-06-30 15:20:21 +04:00
static int pmu_get_event_idx ( struct cci_pmu_hw_events * hw , struct perf_event * event )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
2013-08-22 17:41:51 +04:00
struct hw_perf_event * hw_event = & event - > hw ;
unsigned long cci_event = hw_event - > config_base & CCI_PMU_EVENT_MASK ;
int idx ;
if ( cci_event = = CCI_PMU_CYCLES ) {
if ( test_and_set_bit ( CCI_PMU_CYCLE_CNTR_IDX , hw - > used_mask ) )
return - EAGAIN ;
return CCI_PMU_CYCLE_CNTR_IDX ;
}
for ( idx = CCI_PMU_CNTR0_IDX ; idx < = CCI_PMU_CNTR_LAST ( cci_pmu ) ; + + idx )
if ( ! test_and_set_bit ( idx , hw - > used_mask ) )
return idx ;
/* No counters available */
return - EAGAIN ;
}
static int pmu_map_event ( struct perf_event * event )
{
int mapping ;
u8 config = event - > attr . config & CCI_PMU_EVENT_MASK ;
if ( event - > attr . type < PERF_TYPE_MAX )
return - ENOENT ;
if ( config = = CCI_PMU_CYCLES )
mapping = config ;
else
mapping = pmu_validate_hw_event ( config ) ;
return mapping ;
}
2014-06-30 15:20:21 +04:00
static int pmu_request_irq ( struct cci_pmu * cci_pmu , irq_handler_t handler )
2013-08-22 17:41:51 +04:00
{
int i ;
struct platform_device * pmu_device = cci_pmu - > plat_device ;
if ( unlikely ( ! pmu_device ) )
return - ENODEV ;
if ( pmu - > nr_irqs < 1 ) {
dev_err ( & pmu_device - > dev , " no irqs for CCI PMUs defined \n " ) ;
return - ENODEV ;
}
/*
* Register all available CCI PMU interrupts . In the interrupt handler
* we iterate over the counters checking for interrupt source ( the
* overflowing counter ) and clear it .
*
* This should allow handling of non - unique interrupt for the counters .
*/
for ( i = 0 ; i < pmu - > nr_irqs ; i + + ) {
int err = request_irq ( pmu - > irqs [ i ] , handler , IRQF_SHARED ,
" arm-cci-pmu " , cci_pmu ) ;
if ( err ) {
dev_err ( & pmu_device - > dev , " unable to request IRQ%d for ARM CCI PMU counters \n " ,
pmu - > irqs [ i ] ) ;
return err ;
}
set_bit ( i , & pmu - > active_irqs ) ;
}
return 0 ;
}
2014-06-30 15:20:21 +04:00
static void pmu_free_irq ( struct cci_pmu * cci_pmu )
{
int i ;
for ( i = 0 ; i < pmu - > nr_irqs ; i + + ) {
if ( ! test_and_clear_bit ( i , & pmu - > active_irqs ) )
continue ;
free_irq ( pmu - > irqs [ i ] , cci_pmu ) ;
}
}
static u32 pmu_read_counter ( struct perf_event * event )
{
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
struct hw_perf_event * hw_counter = & event - > hw ;
int idx = hw_counter - > idx ;
u32 value ;
if ( unlikely ( ! pmu_is_valid_counter ( cci_pmu , idx ) ) ) {
dev_err ( & cci_pmu - > plat_device - > dev , " Invalid CCI PMU counter %d \n " , idx ) ;
return 0 ;
}
value = pmu_read_register ( idx , CCI_PMU_CNTR ) ;
return value ;
}
static void pmu_write_counter ( struct perf_event * event , u32 value )
{
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
struct hw_perf_event * hw_counter = & event - > hw ;
int idx = hw_counter - > idx ;
if ( unlikely ( ! pmu_is_valid_counter ( cci_pmu , idx ) ) )
dev_err ( & cci_pmu - > plat_device - > dev , " Invalid CCI PMU counter %d \n " , idx ) ;
else
pmu_write_register ( value , idx , CCI_PMU_CNTR ) ;
}
static u64 pmu_event_update ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
u64 delta , prev_raw_count , new_raw_count ;
do {
prev_raw_count = local64_read ( & hwc - > prev_count ) ;
new_raw_count = pmu_read_counter ( event ) ;
} while ( local64_cmpxchg ( & hwc - > prev_count , prev_raw_count ,
new_raw_count ) ! = prev_raw_count ) ;
delta = ( new_raw_count - prev_raw_count ) & CCI_PMU_CNTR_MASK ;
local64_add ( delta , & event - > count ) ;
return new_raw_count ;
}
static void pmu_read ( struct perf_event * event )
{
pmu_event_update ( event ) ;
}
void pmu_event_set_period ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
/*
* The CCI PMU counters have a period of 2 ^ 32. To account for the
* possiblity of extreme interrupt latency we program for a period of
* half that . Hopefully we can handle the interrupt before another 2 ^ 31
* events occur and the counter overtakes its previous value .
*/
u64 val = 1ULL < < 31 ;
local64_set ( & hwc - > prev_count , val ) ;
pmu_write_counter ( event , val ) ;
}
2013-08-22 17:41:51 +04:00
static irqreturn_t pmu_handle_irq ( int irq_num , void * dev )
{
unsigned long flags ;
2014-06-30 15:20:21 +04:00
struct cci_pmu * cci_pmu = dev ;
struct cci_pmu_hw_events * events = & pmu - > hw_events ;
2013-08-22 17:41:51 +04:00
int idx , handled = IRQ_NONE ;
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/*
* Iterate over counters and update the corresponding perf events .
* This should work regardless of whether we have per - counter overflow
* interrupt or a combined overflow interrupt .
*/
for ( idx = CCI_PMU_CYCLE_CNTR_IDX ; idx < = CCI_PMU_CNTR_LAST ( cci_pmu ) ; idx + + ) {
struct perf_event * event = events - > events [ idx ] ;
struct hw_perf_event * hw_counter ;
if ( ! event )
continue ;
hw_counter = & event - > hw ;
/* Did this counter overflow? */
2014-07-30 14:37:35 +04:00
if ( ! ( pmu_read_register ( idx , CCI_PMU_OVRFLW ) &
CCI_PMU_OVRFLW_FLAG ) )
2013-08-22 17:41:51 +04:00
continue ;
pmu_write_register ( CCI_PMU_OVRFLW_FLAG , idx , CCI_PMU_OVRFLW ) ;
2014-06-30 15:20:21 +04:00
pmu_event_update ( event ) ;
pmu_event_set_period ( event ) ;
2013-08-22 17:41:51 +04:00
handled = IRQ_HANDLED ;
}
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
return IRQ_RETVAL ( handled ) ;
}
2014-06-30 15:20:21 +04:00
static int cci_pmu_get_hw ( struct cci_pmu * cci_pmu )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
int ret = pmu_request_irq ( cci_pmu , pmu_handle_irq ) ;
if ( ret ) {
pmu_free_irq ( cci_pmu ) ;
return ret ;
}
return 0 ;
}
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
static void cci_pmu_put_hw ( struct cci_pmu * cci_pmu )
{
pmu_free_irq ( cci_pmu ) ;
}
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
static void hw_perf_event_destroy ( struct perf_event * event )
{
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
atomic_t * active_events = & cci_pmu - > active_events ;
struct mutex * reserve_mutex = & cci_pmu - > reserve_mutex ;
if ( atomic_dec_and_mutex_lock ( active_events , reserve_mutex ) ) {
cci_pmu_put_hw ( cci_pmu ) ;
mutex_unlock ( reserve_mutex ) ;
2013-08-22 17:41:51 +04:00
}
}
2014-06-30 15:20:21 +04:00
static void cci_pmu_enable ( struct pmu * pmu )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct cci_pmu * cci_pmu = to_cci_pmu ( pmu ) ;
struct cci_pmu_hw_events * hw_events = & cci_pmu - > hw_events ;
int enabled = bitmap_weight ( hw_events - > used_mask , cci_pmu - > num_events ) ;
2013-08-22 17:41:51 +04:00
unsigned long flags ;
2014-06-30 15:20:21 +04:00
u32 val ;
if ( ! enabled )
return ;
raw_spin_lock_irqsave ( & hw_events - > pmu_lock , flags ) ;
/* Enable all the PMU counters. */
val = readl_relaxed ( cci_ctrl_base + CCI_PMCR ) | CCI_PMCR_CEN ;
writel ( val , cci_ctrl_base + CCI_PMCR ) ;
raw_spin_unlock_irqrestore ( & hw_events - > pmu_lock , flags ) ;
}
static void cci_pmu_disable ( struct pmu * pmu )
{
struct cci_pmu * cci_pmu = to_cci_pmu ( pmu ) ;
struct cci_pmu_hw_events * hw_events = & cci_pmu - > hw_events ;
unsigned long flags ;
u32 val ;
raw_spin_lock_irqsave ( & hw_events - > pmu_lock , flags ) ;
/* Disable all the PMU counters. */
val = readl_relaxed ( cci_ctrl_base + CCI_PMCR ) & ~ CCI_PMCR_CEN ;
writel ( val , cci_ctrl_base + CCI_PMCR ) ;
raw_spin_unlock_irqrestore ( & hw_events - > pmu_lock , flags ) ;
}
static void cci_pmu_start ( struct perf_event * event , int pmu_flags )
{
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
struct cci_pmu_hw_events * hw_events = & cci_pmu - > hw_events ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
unsigned long flags ;
/*
* To handle interrupt latency , we always reprogram the period
* regardlesss of PERF_EF_RELOAD .
*/
if ( pmu_flags & PERF_EF_RELOAD )
WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_UPTODATE ) ) ;
hwc - > state = 0 ;
2013-08-22 17:41:51 +04:00
if ( unlikely ( ! pmu_is_valid_counter ( cci_pmu , idx ) ) ) {
dev_err ( & cci_pmu - > plat_device - > dev , " Invalid CCI PMU counter %d \n " , idx ) ;
return ;
}
2014-06-30 15:20:21 +04:00
raw_spin_lock_irqsave ( & hw_events - > pmu_lock , flags ) ;
2013-08-22 17:41:51 +04:00
/* Configure the event to count, unless you are counting cycles */
if ( idx ! = CCI_PMU_CYCLE_CNTR_IDX )
2014-06-30 15:20:21 +04:00
pmu_set_event ( idx , hwc - > config_base ) ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
pmu_event_set_period ( event ) ;
2013-08-22 17:41:51 +04:00
pmu_enable_counter ( idx ) ;
2014-06-30 15:20:21 +04:00
raw_spin_unlock_irqrestore ( & hw_events - > pmu_lock , flags ) ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static void cci_pmu_stop ( struct perf_event * event , int pmu_flags )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
if ( hwc - > state & PERF_HES_STOPPED )
return ;
2013-08-22 17:41:51 +04:00
if ( unlikely ( ! pmu_is_valid_counter ( cci_pmu , idx ) ) ) {
dev_err ( & cci_pmu - > plat_device - > dev , " Invalid CCI PMU counter %d \n " , idx ) ;
return ;
}
2014-06-30 15:20:21 +04:00
/*
* We always reprogram the counter , so ignore PERF_EF_UPDATE . See
* cci_pmu_start ( )
*/
2013-08-22 17:41:51 +04:00
pmu_disable_counter ( idx ) ;
2014-06-30 15:20:21 +04:00
pmu_event_update ( event ) ;
hwc - > state | = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static int cci_pmu_add ( struct perf_event * event , int flags )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
struct cci_pmu_hw_events * hw_events = & cci_pmu - > hw_events ;
struct hw_perf_event * hwc = & event - > hw ;
int idx ;
int err = 0 ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
perf_pmu_disable ( event - > pmu ) ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
/* If we don't have a space for the counter then finish early. */
idx = pmu_get_event_idx ( hw_events , event ) ;
if ( idx < 0 ) {
err = idx ;
goto out ;
}
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
event - > hw . idx = idx ;
hw_events - > events [ idx ] = event ;
hwc - > state = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
if ( flags & PERF_EF_START )
cci_pmu_start ( event , PERF_EF_RELOAD ) ;
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage ( event ) ;
out :
perf_pmu_enable ( event - > pmu ) ;
return err ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static void cci_pmu_del ( struct perf_event * event , int flags )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
struct cci_pmu_hw_events * hw_events = & cci_pmu - > hw_events ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
cci_pmu_stop ( event , PERF_EF_UPDATE ) ;
hw_events - > events [ idx ] = NULL ;
clear_bit ( idx , hw_events - > used_mask ) ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
perf_event_update_userpage ( event ) ;
}
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
static int
validate_event ( struct cci_pmu_hw_events * hw_events ,
struct perf_event * event )
{
if ( is_software_event ( event ) )
return 1 ;
if ( event - > state < PERF_EVENT_STATE_OFF )
return 1 ;
if ( event - > state = = PERF_EVENT_STATE_OFF & & ! event - > attr . enable_on_exec )
return 1 ;
return pmu_get_event_idx ( hw_events , event ) > = 0 ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static int
validate_group ( struct perf_event * event )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct perf_event * sibling , * leader = event - > group_leader ;
struct cci_pmu_hw_events fake_pmu = {
/*
* Initialise the fake PMU . We only need to populate the
* used_mask for the purposes of validation .
*/
. used_mask = CPU_BITS_NONE ,
} ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
if ( ! validate_event ( & fake_pmu , leader ) )
return - EINVAL ;
list_for_each_entry ( sibling , & leader - > sibling_list , group_entry ) {
if ( ! validate_event ( & fake_pmu , sibling ) )
return - EINVAL ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
if ( ! validate_event ( & fake_pmu , event ) )
return - EINVAL ;
return 0 ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static int
__hw_perf_event_init ( struct perf_event * event )
2013-08-22 17:41:51 +04:00
{
2014-06-30 15:20:21 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int mapping ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
mapping = pmu_map_event ( event ) ;
if ( mapping < 0 ) {
pr_debug ( " event %x:%llx not supported \n " , event - > attr . type ,
event - > attr . config ) ;
return mapping ;
}
/*
* We don ' t assign an index until we actually place the event onto
* hardware . Use - 1 to signify that we haven ' t decided where to put it
* yet .
*/
hwc - > idx = - 1 ;
hwc - > config_base = 0 ;
hwc - > config = 0 ;
hwc - > event_base = 0 ;
/*
* Store the event encoding into the config_base field .
*/
hwc - > config_base | = ( unsigned long ) mapping ;
/*
* Limit the sample_period to half of the counter width . That way , the
* new counter value is far less likely to overtake the previous one
* unless you have some serious IRQ latency issues .
*/
hwc - > sample_period = CCI_PMU_CNTR_MASK > > 1 ;
hwc - > last_period = hwc - > sample_period ;
local64_set ( & hwc - > period_left , hwc - > sample_period ) ;
if ( event - > group_leader ! = event ) {
if ( validate_group ( event ) ! = 0 )
return - EINVAL ;
}
return 0 ;
}
static int cci_pmu_event_init ( struct perf_event * event )
{
struct cci_pmu * cci_pmu = to_cci_pmu ( event - > pmu ) ;
atomic_t * active_events = & cci_pmu - > active_events ;
int err = 0 ;
int cpu ;
if ( event - > attr . type ! = event - > pmu - > type )
return - ENOENT ;
/* Shared by all CPUs, no meaningful state to sample */
if ( is_sampling_event ( event ) | | event - > attach_state & PERF_ATTACH_TASK )
return - EOPNOTSUPP ;
/* We have no filtering of any kind */
if ( event - > attr . exclude_user | |
event - > attr . exclude_kernel | |
event - > attr . exclude_hv | |
event - > attr . exclude_idle | |
event - > attr . exclude_host | |
event - > attr . exclude_guest )
return - EINVAL ;
/*
* Following the example set by other " uncore " PMUs , we accept any CPU
* and rewrite its affinity dynamically rather than having perf core
* handle cpu = = - 1 and pid = = - 1 for this case .
*
* The perf core will pin online CPUs for the duration of this call and
* the event being installed into its context , so the PMU ' s CPU can ' t
* change under our feet .
*/
cpu = cpumask_first ( & cci_pmu - > cpus ) ;
if ( event - > cpu < 0 | | cpu < 0 )
return - EINVAL ;
event - > cpu = cpu ;
event - > destroy = hw_perf_event_destroy ;
if ( ! atomic_inc_not_zero ( active_events ) ) {
mutex_lock ( & cci_pmu - > reserve_mutex ) ;
if ( atomic_read ( active_events ) = = 0 )
err = cci_pmu_get_hw ( cci_pmu ) ;
if ( ! err )
atomic_inc ( active_events ) ;
mutex_unlock ( & cci_pmu - > reserve_mutex ) ;
}
if ( err )
return err ;
err = __hw_perf_event_init ( event ) ;
if ( err )
hw_perf_event_destroy ( event ) ;
return err ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static ssize_t pmu_attr_cpumask_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
int n = cpulist_scnprintf ( buf , PAGE_SIZE - 2 , & pmu - > cpus ) ;
buf [ n + + ] = ' \n ' ;
buf [ n ] = ' \0 ' ;
return n ;
}
static DEVICE_ATTR ( cpumask , S_IRUGO , pmu_attr_cpumask_show , NULL ) ;
static struct attribute * pmu_attrs [ ] = {
& dev_attr_cpumask . attr ,
NULL ,
} ;
static struct attribute_group pmu_attr_group = {
. attrs = pmu_attrs ,
} ;
static const struct attribute_group * pmu_attr_groups [ ] = {
& pmu_attr_group ,
NULL
} ;
static int cci_pmu_init ( struct cci_pmu * cci_pmu , struct platform_device * pdev )
{
char * name = pmu_names [ probe_cci_revision ( ) ] ;
cci_pmu - > pmu = ( struct pmu ) {
. name = pmu_names [ probe_cci_revision ( ) ] ,
. task_ctx_nr = perf_invalid_context ,
. pmu_enable = cci_pmu_enable ,
. pmu_disable = cci_pmu_disable ,
. event_init = cci_pmu_event_init ,
. add = cci_pmu_add ,
. del = cci_pmu_del ,
. start = cci_pmu_start ,
. stop = cci_pmu_stop ,
. read = pmu_read ,
. attr_groups = pmu_attr_groups ,
2013-08-22 17:41:51 +04:00
} ;
cci_pmu - > plat_device = pdev ;
cci_pmu - > num_events = pmu_get_max_counters ( ) ;
2014-06-30 15:20:21 +04:00
return perf_pmu_register ( & cci_pmu - > pmu , name , - 1 ) ;
2013-08-22 17:41:51 +04:00
}
2014-06-30 15:20:21 +04:00
static int cci_pmu_cpu_notifier ( struct notifier_block * self ,
unsigned long action , void * hcpu )
{
unsigned int cpu = ( long ) hcpu ;
unsigned int target ;
switch ( action & ~ CPU_TASKS_FROZEN ) {
case CPU_DOWN_PREPARE :
if ( ! cpumask_test_and_clear_cpu ( cpu , & pmu - > cpus ) )
break ;
target = cpumask_any_but ( cpu_online_mask , cpu ) ;
if ( target < 0 ) // UP, last CPU
break ;
/*
* TODO : migrate context once core races on event - > ctx have
* been fixed .
*/
cpumask_set_cpu ( target , & pmu - > cpus ) ;
default :
break ;
}
return NOTIFY_OK ;
}
static struct notifier_block cci_pmu_cpu_nb = {
. notifier_call = cci_pmu_cpu_notifier ,
/*
* to migrate uncore events , our notifier should be executed
* before perf core ' s notifier .
*/
. priority = CPU_PRI_PERF + 1 ,
} ;
2013-08-22 17:41:51 +04:00
static const struct of_device_id arm_cci_pmu_matches [ ] = {
{
. compatible = " arm,cci-400-pmu " ,
} ,
{ } ,
} ;
static int cci_pmu_probe ( struct platform_device * pdev )
{
struct resource * res ;
int i , ret , irq ;
pmu = devm_kzalloc ( & pdev - > dev , sizeof ( * pmu ) , GFP_KERNEL ) ;
if ( ! pmu )
return - ENOMEM ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
pmu - > base = devm_ioremap_resource ( & pdev - > dev , res ) ;
2013-09-22 09:04:23 +04:00
if ( IS_ERR ( pmu - > base ) )
return - ENOMEM ;
2013-08-22 17:41:51 +04:00
/*
* CCI PMU has 5 overflow signals - one per counter ; but some may be tied
* together to a common interrupt .
*/
pmu - > nr_irqs = 0 ;
for ( i = 0 ; i < CCI_PMU_MAX_HW_EVENTS ; i + + ) {
irq = platform_get_irq ( pdev , i ) ;
if ( irq < 0 )
break ;
if ( is_duplicate_irq ( irq , pmu - > irqs , pmu - > nr_irqs ) )
continue ;
pmu - > irqs [ pmu - > nr_irqs + + ] = irq ;
}
/*
* Ensure that the device tree has as many interrupts as the number
* of counters .
*/
if ( i < CCI_PMU_MAX_HW_EVENTS ) {
dev_warn ( & pdev - > dev , " In-correct number of interrupts: %d, should be %d \n " ,
i , CCI_PMU_MAX_HW_EVENTS ) ;
2013-09-22 09:04:23 +04:00
return - EINVAL ;
2013-08-22 17:41:51 +04:00
}
pmu - > port_ranges = port_range_by_rev ( ) ;
if ( ! pmu - > port_ranges ) {
dev_warn ( & pdev - > dev , " CCI PMU version not supported \n " ) ;
2013-09-22 09:04:23 +04:00
return - EINVAL ;
2013-08-22 17:41:51 +04:00
}
raw_spin_lock_init ( & pmu - > hw_events . pmu_lock ) ;
2014-06-30 15:20:21 +04:00
mutex_init ( & pmu - > reserve_mutex ) ;
atomic_set ( & pmu - > active_events , 0 ) ;
cpumask_set_cpu ( smp_processor_id ( ) , & pmu - > cpus ) ;
ret = register_cpu_notifier ( & cci_pmu_cpu_nb ) ;
if ( ret )
return ret ;
2013-08-22 17:41:51 +04:00
2014-06-30 15:20:21 +04:00
ret = cci_pmu_init ( pmu , pdev ) ;
2013-08-22 17:41:51 +04:00
if ( ret )
2013-09-22 09:04:23 +04:00
return ret ;
2013-08-22 17:41:51 +04:00
return 0 ;
}
static int cci_platform_probe ( struct platform_device * pdev )
{
if ( ! cci_probed ( ) )
return - ENODEV ;
return of_platform_populate ( pdev - > dev . of_node , NULL , NULL , & pdev - > dev ) ;
}
# endif /* CONFIG_HW_PERF_EVENTS */
2012-07-13 18:55:52 +04:00
struct cpu_port {
u64 mpidr ;
u32 port ;
} ;
2013-05-22 07:34:41 +04:00
2012-07-13 18:55:52 +04:00
/*
* Use the port MSB as valid flag , shift can be made dynamic
* by computing number of bits required for port indexes .
* Code disabling CCI cpu ports runs with D - cache invalidated
* and SCTLR bit clear so data accesses must be kept to a minimum
* to improve performance ; for now shift is left static to
* avoid one more data access while disabling the CCI port .
*/
# define PORT_VALID_SHIFT 31
# define PORT_VALID (0x1 << PORT_VALID_SHIFT)
static inline void init_cpu_port ( struct cpu_port * port , u32 index , u64 mpidr )
{
port - > port = PORT_VALID | index ;
port - > mpidr = mpidr ;
}
static inline bool cpu_port_is_valid ( struct cpu_port * port )
{
return ! ! ( port - > port & PORT_VALID ) ;
}
static inline bool cpu_port_match ( struct cpu_port * port , u64 mpidr )
{
return port - > mpidr = = ( mpidr & MPIDR_HWID_BITMASK ) ;
}
static struct cpu_port cpu_port [ NR_CPUS ] ;
/**
* __cci_ace_get_port - Function to retrieve the port index connected to
* a cpu or device .
*
* @ dn : device node of the device to look - up
* @ type : port type
*
* Return value :
* - CCI port index if success
* - - ENODEV if failure
*/
static int __cci_ace_get_port ( struct device_node * dn , int type )
{
int i ;
bool ace_match ;
struct device_node * cci_portn ;
cci_portn = of_parse_phandle ( dn , " cci-control-port " , 0 ) ;
for ( i = 0 ; i < nb_cci_ports ; i + + ) {
ace_match = ports [ i ] . type = = type ;
if ( ace_match & & cci_portn = = ports [ i ] . dn )
return i ;
}
return - ENODEV ;
}
int cci_ace_get_port ( struct device_node * dn )
{
return __cci_ace_get_port ( dn , ACE_LITE_PORT ) ;
}
EXPORT_SYMBOL_GPL ( cci_ace_get_port ) ;
2013-08-22 17:41:51 +04:00
static void cci_ace_init_ports ( void )
2012-07-13 18:55:52 +04:00
{
2013-06-17 17:51:48 +04:00
int port , cpu ;
struct device_node * cpun ;
2012-07-13 18:55:52 +04:00
/*
* Port index look - up speeds up the function disabling ports by CPU ,
* since the logical to port index mapping is done once and does
* not change after system boot .
* The stashed index array is initialized for all possible CPUs
* at probe time .
*/
2013-06-17 17:51:48 +04:00
for_each_possible_cpu ( cpu ) {
/* too early to use cpu->of_node */
cpun = of_get_cpu_node ( cpu , NULL ) ;
2012-07-13 18:55:52 +04:00
2013-06-17 17:51:48 +04:00
if ( WARN ( ! cpun , " Missing cpu device node \n " ) )
2012-07-13 18:55:52 +04:00
continue ;
2013-06-17 17:51:48 +04:00
2012-07-13 18:55:52 +04:00
port = __cci_ace_get_port ( cpun , ACE_PORT ) ;
if ( port < 0 )
continue ;
init_cpu_port ( & cpu_port [ cpu ] , port , cpu_logical_map ( cpu ) ) ;
}
for_each_possible_cpu ( cpu ) {
WARN ( ! cpu_port_is_valid ( & cpu_port [ cpu ] ) ,
" CPU %u does not have an associated CCI port \n " ,
cpu ) ;
}
}
/*
* Functions to enable / disable a CCI interconnect slave port
*
* They are called by low - level power management code to disable slave
* interfaces snoops and DVM broadcast .
* Since they may execute with cache data allocation disabled and
* after the caches have been cleaned and invalidated the functions provide
* no explicit locking since they may run with D - cache disabled , so normal
* cacheable kernel locks based on ldrex / strex may not work .
* Locking has to be provided by BSP implementations to ensure proper
* operations .
*/
/**
* cci_port_control ( ) - function to control a CCI port
*
* @ port : index of the port to setup
* @ enable : if true enables the port , if false disables it
*/
static void notrace cci_port_control ( unsigned int port , bool enable )
{
void __iomem * base = ports [ port ] . base ;
writel_relaxed ( enable ? CCI_ENABLE_REQ : 0 , base + CCI_PORT_CTRL ) ;
/*
* This function is called from power down procedures
* and must not execute any instruction that might
* cause the processor to be put in a quiescent state
* ( eg wfi ) . Hence , cpu_relax ( ) can not be added to this
* read loop to optimize power , since it might hide possibly
* disruptive operations .
*/
while ( readl_relaxed ( cci_ctrl_base + CCI_CTRL_STATUS ) & 0x1 )
;
}
/**
* cci_disable_port_by_cpu ( ) - function to disable a CCI port by CPU
* reference
*
* @ mpidr : mpidr of the CPU whose CCI port should be disabled
*
* Disabling a CCI port for a CPU implies disabling the CCI port
* controlling that CPU cluster . Code disabling CPU CCI ports
* must make sure that the CPU running the code is the last active CPU
* in the cluster ie all other CPUs are quiescent in a low power state .
*
* Return :
* 0 on success
* - ENODEV on port look - up failure
*/
int notrace cci_disable_port_by_cpu ( u64 mpidr )
{
int cpu ;
bool is_valid ;
for ( cpu = 0 ; cpu < nr_cpu_ids ; cpu + + ) {
is_valid = cpu_port_is_valid ( & cpu_port [ cpu ] ) ;
if ( is_valid & & cpu_port_match ( & cpu_port [ cpu ] , mpidr ) ) {
cci_port_control ( cpu_port [ cpu ] . port , false ) ;
return 0 ;
}
}
return - ENODEV ;
}
EXPORT_SYMBOL_GPL ( cci_disable_port_by_cpu ) ;
2013-05-22 07:34:41 +04:00
/**
* cci_enable_port_for_self ( ) - enable a CCI port for calling CPU
*
* Enabling a CCI port for the calling CPU implies enabling the CCI
* port controlling that CPU ' s cluster . Caller must make sure that the
* CPU running the code is the first active CPU in the cluster and all
* other CPUs are quiescent in a low power state or waiting for this CPU
* to complete the CCI initialization .
*
* Because this is called when the MMU is still off and with no stack ,
* the code must be position independent and ideally rely on callee
* clobbered registers only . To achieve this we must code this function
* entirely in assembler .
*
* On success this returns with the proper CCI port enabled . In case of
* any failure this never returns as the inability to enable the CCI is
* fatal and there is no possible recovery at this stage .
*/
asmlinkage void __naked cci_enable_port_for_self ( void )
{
asm volatile ( " \n "
2013-06-03 17:15:36 +04:00
" .arch armv7-a \n "
2013-05-22 07:34:41 +04:00
" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n "
" and r0, r0, # " __stringify ( MPIDR_HWID_BITMASK ) " \n "
" adr r1, 5f \n "
" ldr r2, [r1] \n "
" add r1, r1, r2 @ &cpu_port \n "
" add ip, r1, %[sizeof_cpu_port] \n "
/* Loop over the cpu_port array looking for a matching MPIDR */
" 1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n "
" cmp r2, r0 @ compare MPIDR \n "
" bne 2f \n "
/* Found a match, now test port validity */
" ldr r3, [r1, %[offsetof_cpu_port_port]] \n "
" tst r3, # " __stringify ( PORT_VALID ) " \n "
" bne 3f \n "
/* no match, loop with the next cpu_port entry */
" 2: add r1, r1, %[sizeof_struct_cpu_port] \n "
" cmp r1, ip @ done? \n "
" blo 1b \n "
/* CCI port not found -- cheaply try to stall this CPU */
" cci_port_not_found: \n "
" wfi \n "
" wfe \n "
" b cci_port_not_found \n "
/* Use matched port index to look up the corresponding ports entry */
" 3: bic r3, r3, # " __stringify ( PORT_VALID ) " \n "
" adr r0, 6f \n "
" ldmia r0, {r1, r2} \n "
" sub r1, r1, r0 @ virt - phys \n "
" ldr r0, [r0, r2] @ *(&ports) \n "
" mov r2, %[sizeof_struct_ace_port] \n "
" mla r0, r2, r3, r0 @ &ports[index] \n "
" sub r0, r0, r1 @ virt_to_phys() \n "
/* Enable the CCI port */
" ldr r0, [r0, %[offsetof_port_phys]] \n "
2013-10-16 08:50:34 +04:00
" mov r3, %[cci_enable_req] \n "
2013-05-22 07:34:41 +04:00
" str r3, [r0, # " __stringify ( CCI_PORT_CTRL ) " ] \n "
/* poll the status reg for completion */
" adr r1, 7f \n "
" ldr r0, [r1] \n "
" ldr r0, [r0, r1] @ cci_ctrl_base \n "
" 4: ldr r1, [r0, # " __stringify ( CCI_CTRL_STATUS ) " ] \n "
2013-10-16 08:50:34 +04:00
" tst r1, %[cci_control_status_bits] \n "
2013-05-22 07:34:41 +04:00
" bne 4b \n "
" mov r0, #0 \n "
" bx lr \n "
" .align 2 \n "
" 5: .word cpu_port - . \n "
" 6: .word . \n "
" .word ports - 6b \n "
" 7: .word cci_ctrl_phys - . \n "
: :
[ sizeof_cpu_port ] " i " ( sizeof ( cpu_port ) ) ,
2013-10-16 08:50:34 +04:00
[ cci_enable_req ] " i " cpu_to_le32 ( CCI_ENABLE_REQ ) ,
[ cci_control_status_bits ] " i " cpu_to_le32 ( 1 ) ,
2013-05-22 07:34:41 +04:00
# ifndef __ARMEB__
[ offsetof_cpu_port_mpidr_lsb ] " i " ( offsetof ( struct cpu_port , mpidr ) ) ,
# else
[ offsetof_cpu_port_mpidr_lsb ] " i " ( offsetof ( struct cpu_port , mpidr ) + 4 ) ,
# endif
[ offsetof_cpu_port_port ] " i " ( offsetof ( struct cpu_port , port ) ) ,
[ sizeof_struct_cpu_port ] " i " ( sizeof ( struct cpu_port ) ) ,
[ sizeof_struct_ace_port ] " i " ( sizeof ( struct cci_ace_port ) ) ,
[ offsetof_port_phys ] " i " ( offsetof ( struct cci_ace_port , phys ) ) ) ;
unreachable ( ) ;
}
2012-07-13 18:55:52 +04:00
/**
* __cci_control_port_by_device ( ) - function to control a CCI port by device
* reference
*
* @ dn : device node pointer of the device whose CCI port should be
* controlled
* @ enable : if true enables the port , if false disables it
*
* Return :
* 0 on success
* - ENODEV on port look - up failure
*/
int notrace __cci_control_port_by_device ( struct device_node * dn , bool enable )
{
int port ;
if ( ! dn )
return - ENODEV ;
port = __cci_ace_get_port ( dn , ACE_LITE_PORT ) ;
if ( WARN_ONCE ( port < 0 , " node %s ACE lite port look-up failure \n " ,
dn - > full_name ) )
return - ENODEV ;
cci_port_control ( port , enable ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( __cci_control_port_by_device ) ;
/**
* __cci_control_port_by_index ( ) - function to control a CCI port by port index
*
* @ port : port index previously retrieved with cci_ace_get_port ( )
* @ enable : if true enables the port , if false disables it
*
* Return :
* 0 on success
* - ENODEV on port index out of range
* - EPERM if operation carried out on an ACE PORT
*/
int notrace __cci_control_port_by_index ( u32 port , bool enable )
{
if ( port > = nb_cci_ports | | ports [ port ] . type = = ACE_INVALID_PORT )
return - ENODEV ;
/*
* CCI control for ports connected to CPUS is extremely fragile
* and must be made to go through a specific and controlled
* interface ( ie cci_disable_port_by_cpu ( ) ; control by general purpose
* indexing is therefore disabled for ACE ports .
*/
if ( ports [ port ] . type = = ACE_PORT )
return - EPERM ;
cci_port_control ( port , enable ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( __cci_control_port_by_index ) ;
static const struct cci_nb_ports cci400_ports = {
. nb_ace = 2 ,
. nb_ace_lite = 3
} ;
static const struct of_device_id arm_cci_matches [ ] = {
{ . compatible = " arm,cci-400 " , . data = & cci400_ports } ,
{ } ,
} ;
static const struct of_device_id arm_cci_ctrl_if_matches [ ] = {
{ . compatible = " arm,cci-400-ctrl-if " , } ,
{ } ,
} ;
2013-08-22 17:41:51 +04:00
static int cci_probe ( void )
2012-07-13 18:55:52 +04:00
{
struct cci_nb_ports const * cci_config ;
int ret , i , nb_ace = 0 , nb_ace_lite = 0 ;
struct device_node * np , * cp ;
2013-05-22 07:34:41 +04:00
struct resource res ;
2012-07-13 18:55:52 +04:00
const char * match_str ;
bool is_ace ;
np = of_find_matching_node ( NULL , arm_cci_matches ) ;
if ( ! np )
return - ENODEV ;
2015-01-10 06:11:35 +03:00
if ( ! of_device_is_available ( np ) )
return - ENODEV ;
2012-07-13 18:55:52 +04:00
cci_config = of_match_node ( arm_cci_matches , np ) - > data ;
if ( ! cci_config )
return - ENODEV ;
nb_cci_ports = cci_config - > nb_ace + cci_config - > nb_ace_lite ;
2014-01-27 14:50:37 +04:00
ports = kcalloc ( nb_cci_ports , sizeof ( * ports ) , GFP_KERNEL ) ;
2012-07-13 18:55:52 +04:00
if ( ! ports )
return - ENOMEM ;
2013-05-22 07:34:41 +04:00
ret = of_address_to_resource ( np , 0 , & res ) ;
if ( ! ret ) {
cci_ctrl_base = ioremap ( res . start , resource_size ( & res ) ) ;
cci_ctrl_phys = res . start ;
}
if ( ret | | ! cci_ctrl_base ) {
2012-07-13 18:55:52 +04:00
WARN ( 1 , " unable to ioremap CCI ctrl \n " ) ;
ret = - ENXIO ;
goto memalloc_err ;
}
for_each_child_of_node ( np , cp ) {
if ( ! of_match_node ( arm_cci_ctrl_if_matches , cp ) )
continue ;
i = nb_ace + nb_ace_lite ;
if ( i > = nb_cci_ports )
break ;
if ( of_property_read_string ( cp , " interface-type " ,
& match_str ) ) {
WARN ( 1 , " node %s missing interface-type property \n " ,
cp - > full_name ) ;
continue ;
}
is_ace = strcmp ( match_str , " ace " ) = = 0 ;
if ( ! is_ace & & strcmp ( match_str , " ace-lite " ) ) {
WARN ( 1 , " node %s containing invalid interface-type property, skipping it \n " ,
cp - > full_name ) ;
continue ;
}
2013-05-22 07:34:41 +04:00
ret = of_address_to_resource ( cp , 0 , & res ) ;
if ( ! ret ) {
ports [ i ] . base = ioremap ( res . start , resource_size ( & res ) ) ;
ports [ i ] . phys = res . start ;
}
if ( ret | | ! ports [ i ] . base ) {
2012-07-13 18:55:52 +04:00
WARN ( 1 , " unable to ioremap CCI port %d \n " , i ) ;
continue ;
}
if ( is_ace ) {
if ( WARN_ON ( nb_ace > = cci_config - > nb_ace ) )
continue ;
ports [ i ] . type = ACE_PORT ;
+ + nb_ace ;
} else {
if ( WARN_ON ( nb_ace_lite > = cci_config - > nb_ace_lite ) )
continue ;
ports [ i ] . type = ACE_LITE_PORT ;
+ + nb_ace_lite ;
}
ports [ i ] . dn = cp ;
}
/* initialize a stashed array of ACE ports to speed-up look-up */
cci_ace_init_ports ( ) ;
/*
* Multi - cluster systems may need this data when non - coherent , during
* cluster power - up / power - down . Make sure it reaches main memory .
*/
sync_cache_w ( & cci_ctrl_base ) ;
2013-05-22 07:34:41 +04:00
sync_cache_w ( & cci_ctrl_phys ) ;
2012-07-13 18:55:52 +04:00
sync_cache_w ( & ports ) ;
sync_cache_w ( & cpu_port ) ;
__sync_cache_range_w ( ports , sizeof ( * ports ) * nb_cci_ports ) ;
pr_info ( " ARM CCI driver probed \n " ) ;
return 0 ;
memalloc_err :
kfree ( ports ) ;
return ret ;
}
static int cci_init_status = - EAGAIN ;
static DEFINE_MUTEX ( cci_probing ) ;
2013-08-22 17:41:51 +04:00
static int cci_init ( void )
2012-07-13 18:55:52 +04:00
{
if ( cci_init_status ! = - EAGAIN )
return cci_init_status ;
mutex_lock ( & cci_probing ) ;
if ( cci_init_status = = - EAGAIN )
cci_init_status = cci_probe ( ) ;
mutex_unlock ( & cci_probing ) ;
return cci_init_status ;
}
2013-08-22 17:41:51 +04:00
# ifdef CONFIG_HW_PERF_EVENTS
static struct platform_driver cci_pmu_driver = {
. driver = {
. name = DRIVER_NAME_PMU ,
. of_match_table = arm_cci_pmu_matches ,
} ,
. probe = cci_pmu_probe ,
} ;
static struct platform_driver cci_platform_driver = {
. driver = {
. name = DRIVER_NAME ,
. of_match_table = arm_cci_matches ,
} ,
. probe = cci_platform_probe ,
} ;
static int __init cci_platform_init ( void )
{
int ret ;
ret = platform_driver_register ( & cci_pmu_driver ) ;
if ( ret )
return ret ;
return platform_driver_register ( & cci_platform_driver ) ;
}
# else
static int __init cci_platform_init ( void )
{
return 0 ;
}
# endif
2012-07-13 18:55:52 +04:00
/*
* To sort out early init calls ordering a helper function is provided to
* check if the CCI driver has beed initialized . Function check if the driver
* has been initialized , if not it calls the init function that probes
* the driver and updates the return value .
*/
2013-08-22 17:41:51 +04:00
bool cci_probed ( void )
2012-07-13 18:55:52 +04:00
{
return cci_init ( ) = = 0 ;
}
EXPORT_SYMBOL_GPL ( cci_probed ) ;
early_initcall ( cci_init ) ;
2013-08-22 17:41:51 +04:00
core_initcall ( cci_platform_init ) ;
2012-07-13 18:55:52 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " ARM CCI support " ) ;