2018-05-09 21:06:04 +03:00
// SPDX-License-Identifier: GPL-2.0
2016-05-03 20:33:50 +03:00
/*
* Copyright ( C ) 2016 Linaro Limited . All rights reserved .
* Author : Mathieu Poirier < mathieu . poirier @ linaro . org >
*/
# include <linux/coresight.h>
2016-05-03 20:33:52 +03:00
# include <linux/dma-mapping.h>
2016-05-03 20:33:50 +03:00
# include "coresight-priv.h"
# include "coresight-tmc.h"
2016-09-09 01:50:39 +03:00
static void tmc_etr_enable_hw ( struct tmc_drvdata * drvdata )
2016-05-03 20:33:50 +03:00
{
2017-08-02 19:22:16 +03:00
u32 axictl , sts ;
2016-05-03 20:33:50 +03:00
/* Zero out the memory to help with debug */
memset ( drvdata - > vaddr , 0 , drvdata - > size ) ;
CS_UNLOCK ( drvdata - > base ) ;
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready ( drvdata ) ;
writel_relaxed ( drvdata - > size / 4 , drvdata - > base + TMC_RSZ ) ;
writel_relaxed ( TMC_MODE_CIRCULAR_BUFFER , drvdata - > base + TMC_MODE ) ;
axictl = readl_relaxed ( drvdata - > base + TMC_AXICTL ) ;
2017-08-02 19:22:14 +03:00
axictl & = ~ TMC_AXICTL_CLEAR_MASK ;
axictl | = ( TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16 ) ;
axictl | = TMC_AXICTL_AXCACHE_OS ;
2017-08-02 19:22:15 +03:00
if ( tmc_etr_has_cap ( drvdata , TMC_ETR_AXI_ARCACHE ) ) {
axictl & = ~ TMC_AXICTL_ARCACHE_MASK ;
axictl | = TMC_AXICTL_ARCACHE_OS ;
}
2016-05-03 20:33:50 +03:00
writel_relaxed ( axictl , drvdata - > base + TMC_AXICTL ) ;
2017-08-02 19:22:07 +03:00
tmc_write_dba ( drvdata , drvdata - > paddr ) ;
2017-08-02 19:22:16 +03:00
/*
* If the TMC pointers must be programmed before the session ,
* we have to set it properly ( i . e , RRP / RWP to base address and
* STS to " not full " ) .
*/
if ( tmc_etr_has_cap ( drvdata , TMC_ETR_SAVE_RESTORE ) ) {
tmc_write_rrp ( drvdata , drvdata - > paddr ) ;
tmc_write_rwp ( drvdata , drvdata - > paddr ) ;
sts = readl_relaxed ( drvdata - > base + TMC_STS ) & ~ TMC_STS_FULL ;
writel_relaxed ( sts , drvdata - > base + TMC_STS ) ;
}
2016-05-03 20:33:50 +03:00
writel_relaxed ( TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN ,
drvdata - > base + TMC_FFCR ) ;
writel_relaxed ( drvdata - > trigger_cntr , drvdata - > base + TMC_TRG ) ;
tmc_enable_hw ( drvdata ) ;
CS_LOCK ( drvdata - > base ) ;
}
static void tmc_etr_dump_hw ( struct tmc_drvdata * drvdata )
{
2017-08-02 19:21:57 +03:00
const u32 * barrier ;
2017-08-02 19:22:07 +03:00
u32 val ;
2017-08-02 19:21:57 +03:00
u32 * temp ;
2017-08-02 19:22:07 +03:00
u64 rwp ;
2016-05-03 20:33:50 +03:00
2017-08-02 19:22:07 +03:00
rwp = tmc_read_rwp ( drvdata ) ;
2016-05-03 20:33:50 +03:00
val = readl_relaxed ( drvdata - > base + TMC_STS ) ;
2016-08-26 00:18:57 +03:00
/*
* Adjust the buffer to point to the beginning of the trace data
* and update the available trace data .
*/
2016-08-26 00:18:59 +03:00
if ( val & TMC_STS_FULL ) {
2016-05-03 20:33:50 +03:00
drvdata - > buf = drvdata - > vaddr + rwp - drvdata - > paddr ;
2016-08-26 00:18:57 +03:00
drvdata - > len = drvdata - > size ;
2017-08-02 19:21:57 +03:00
barrier = barrier_pkt ;
temp = ( u32 * ) drvdata - > buf ;
while ( * barrier ) {
* temp = * barrier ;
temp + + ;
barrier + + ;
}
2016-08-26 00:18:57 +03:00
} else {
2016-05-03 20:33:50 +03:00
drvdata - > buf = drvdata - > vaddr ;
2016-08-26 00:18:57 +03:00
drvdata - > len = rwp - drvdata - > paddr ;
}
2016-05-03 20:33:50 +03:00
}
2016-05-03 20:33:51 +03:00
static void tmc_etr_disable_hw ( struct tmc_drvdata * drvdata )
2016-05-03 20:33:50 +03:00
{
CS_UNLOCK ( drvdata - > base ) ;
tmc_flush_and_stop ( drvdata ) ;
2016-05-03 20:33:55 +03:00
/*
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled .
*/
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode = = CS_MODE_SYSFS )
2016-05-03 20:33:55 +03:00
tmc_etr_dump_hw ( drvdata ) ;
2016-05-03 20:33:50 +03:00
tmc_disable_hw ( drvdata ) ;
CS_LOCK ( drvdata - > base ) ;
}
2016-11-29 19:47:16 +03:00
static int tmc_enable_etr_sink_sysfs ( struct coresight_device * csdev )
2016-05-03 20:33:50 +03:00
{
2016-05-03 20:33:52 +03:00
int ret = 0 ;
bool used = false ;
2016-05-03 20:33:50 +03:00
unsigned long flags ;
2016-05-03 20:33:52 +03:00
void __iomem * vaddr = NULL ;
2018-05-09 21:06:06 +03:00
dma_addr_t paddr = 0 ;
2016-05-03 20:33:50 +03:00
struct tmc_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
2016-05-03 20:33:52 +03:00
/*
* If we don ' t have a buffer release the lock and allocate memory .
* Otherwise keep the lock and move along .
*/
2016-05-03 20:33:50 +03:00
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2016-05-03 20:33:52 +03:00
if ( ! drvdata - > vaddr ) {
2016-05-03 20:33:50 +03:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2016-05-03 20:33:52 +03:00
/*
* Contiguous memory can ' t be allocated while a spinlock is
* held . As such allocate memory here and free it if a buffer
* has already been allocated ( from a previous session ) .
*/
vaddr = dma_alloc_coherent ( drvdata - > dev , drvdata - > size ,
& paddr , GFP_KERNEL ) ;
if ( ! vaddr )
return - ENOMEM ;
/* Let's try again */
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
}
if ( drvdata - > reading ) {
ret = - EBUSY ;
goto out ;
}
2016-05-03 20:33:54 +03:00
/*
* In sysFS mode we can have multiple writers per sink . Since this
* sink is already enabled no memory is needed and the HW need not be
* touched .
*/
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode = = CS_MODE_SYSFS )
2016-05-03 20:33:54 +03:00
goto out ;
2016-05-03 20:33:52 +03:00
/*
2018-05-09 21:06:05 +03:00
* If drvdata : : vaddr = = NULL , use the memory allocated above .
2016-05-03 20:33:52 +03:00
* Otherwise a buffer still exists from a previous session , so
* simply use that .
*/
2018-05-09 21:06:05 +03:00
if ( drvdata - > vaddr = = NULL ) {
2016-05-03 20:33:52 +03:00
used = true ;
drvdata - > vaddr = vaddr ;
drvdata - > paddr = paddr ;
drvdata - > buf = drvdata - > vaddr ;
2016-05-03 20:33:50 +03:00
}
2016-11-29 19:47:15 +03:00
drvdata - > mode = CS_MODE_SYSFS ;
2016-05-03 20:33:50 +03:00
tmc_etr_enable_hw ( drvdata ) ;
2016-05-03 20:33:52 +03:00
out :
2016-05-03 20:33:50 +03:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2016-05-03 20:33:52 +03:00
/* Free memory outside the spinlock if need be */
if ( ! used & & vaddr )
dma_free_coherent ( drvdata - > dev , drvdata - > size , vaddr , paddr ) ;
if ( ! ret )
dev_info ( drvdata - > dev , " TMC-ETR enabled \n " ) ;
return ret ;
2016-05-03 20:33:50 +03:00
}
2016-11-29 19:47:16 +03:00
static int tmc_enable_etr_sink_perf ( struct coresight_device * csdev )
2016-05-03 20:33:56 +03:00
{
int ret = 0 ;
unsigned long flags ;
struct tmc_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
if ( drvdata - > reading ) {
ret = - EINVAL ;
goto out ;
}
/*
* In Perf mode there can be only one writer per sink . There
* is also no need to continue if the ETR is already operated
* from sysFS .
*/
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode ! = CS_MODE_DISABLED ) {
2016-05-03 20:33:56 +03:00
ret = - EINVAL ;
goto out ;
}
2016-11-29 19:47:15 +03:00
drvdata - > mode = CS_MODE_PERF ;
2016-05-03 20:33:56 +03:00
tmc_etr_enable_hw ( drvdata ) ;
out :
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
return ret ;
}
static int tmc_enable_etr_sink ( struct coresight_device * csdev , u32 mode )
{
switch ( mode ) {
case CS_MODE_SYSFS :
2016-11-29 19:47:16 +03:00
return tmc_enable_etr_sink_sysfs ( csdev ) ;
2016-05-03 20:33:56 +03:00
case CS_MODE_PERF :
2016-11-29 19:47:16 +03:00
return tmc_enable_etr_sink_perf ( csdev ) ;
2016-05-03 20:33:56 +03:00
}
/* We shouldn't be here */
return - EINVAL ;
}
2016-05-03 20:33:50 +03:00
static void tmc_disable_etr_sink ( struct coresight_device * csdev )
{
unsigned long flags ;
struct tmc_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
if ( drvdata - > reading ) {
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
return ;
}
2016-05-03 20:33:54 +03:00
/* Disable the TMC only if it needs to */
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode ! = CS_MODE_DISABLED ) {
2016-05-03 20:33:54 +03:00
tmc_etr_disable_hw ( drvdata ) ;
2016-11-29 19:47:15 +03:00
drvdata - > mode = CS_MODE_DISABLED ;
}
2016-05-03 20:33:54 +03:00
2016-05-03 20:33:50 +03:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
dev_info ( drvdata - > dev , " TMC-ETR disabled \n " ) ;
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
. enable = tmc_enable_etr_sink ,
. disable = tmc_disable_etr_sink ,
} ;
const struct coresight_ops tmc_etr_cs_ops = {
. sink_ops = & tmc_etr_sink_ops ,
} ;
2016-05-03 20:33:51 +03:00
int tmc_read_prepare_etr ( struct tmc_drvdata * drvdata )
{
2016-05-03 20:33:52 +03:00
int ret = 0 ;
2016-05-03 20:33:51 +03:00
unsigned long flags ;
/* config types are set a boot time and never change */
if ( WARN_ON_ONCE ( drvdata - > config_type ! = TMC_CONFIG_TYPE_ETR ) )
return - EINVAL ;
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2016-05-03 20:33:53 +03:00
if ( drvdata - > reading ) {
ret = - EBUSY ;
goto out ;
}
2016-05-03 20:33:51 +03:00
2016-05-03 20:33:56 +03:00
/* Don't interfere if operated from Perf */
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode = = CS_MODE_PERF ) {
2016-05-03 20:33:56 +03:00
ret = - EINVAL ;
goto out ;
}
2016-05-03 20:33:52 +03:00
/* If drvdata::buf is NULL the trace data has been read already */
if ( drvdata - > buf = = NULL ) {
ret = - EINVAL ;
goto out ;
}
2016-05-03 20:33:51 +03:00
/* Disable the TMC if need be */
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode = = CS_MODE_SYSFS )
2016-05-03 20:33:51 +03:00
tmc_etr_disable_hw ( drvdata ) ;
drvdata - > reading = true ;
2016-05-03 20:33:52 +03:00
out :
2016-05-03 20:33:51 +03:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2016-05-03 20:33:56 +03:00
return ret ;
2016-05-03 20:33:51 +03:00
}
int tmc_read_unprepare_etr ( struct tmc_drvdata * drvdata )
{
unsigned long flags ;
2016-05-03 20:33:52 +03:00
dma_addr_t paddr ;
void __iomem * vaddr = NULL ;
2016-05-03 20:33:51 +03:00
/* config types are set a boot time and never change */
if ( WARN_ON_ONCE ( drvdata - > config_type ! = TMC_CONFIG_TYPE_ETR ) )
return - EINVAL ;
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
/* RE-enable the TMC if need be */
2016-11-29 19:47:15 +03:00
if ( drvdata - > mode = = CS_MODE_SYSFS ) {
2016-05-03 20:33:52 +03:00
/*
* The trace run will continue with the same allocated trace
2016-06-14 20:17:14 +03:00
* buffer . The trace buffer is cleared in tmc_etr_enable_hw ( ) ,
* so we don ' t have to explicitly clear it . Also , since the
* tracer is still enabled drvdata : : buf can ' t be NULL .
2016-05-03 20:33:52 +03:00
*/
2016-05-03 20:33:51 +03:00
tmc_etr_enable_hw ( drvdata ) ;
2016-05-03 20:33:52 +03:00
} else {
/*
* The ETR is not tracing and the buffer was just read .
* As such prepare to free the trace buffer .
*/
vaddr = drvdata - > vaddr ;
paddr = drvdata - > paddr ;
2016-06-14 20:17:13 +03:00
drvdata - > buf = drvdata - > vaddr = NULL ;
2016-05-03 20:33:52 +03:00
}
2016-05-03 20:33:51 +03:00
drvdata - > reading = false ;
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2016-05-03 20:33:52 +03:00
/* Free allocated memory out side of the spinlock */
if ( vaddr )
dma_free_coherent ( drvdata - > dev , drvdata - > size , vaddr , paddr ) ;
2016-05-03 20:33:51 +03:00
return 0 ;
}