2018-05-09 12:06:04 -06:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( c ) 2011 - 2012 , The Linux Foundation . All rights reserved .
2016-02-17 17:52:03 -07:00
*
* Description : CoreSight Embedded Trace Buffer driver
2014-11-03 11:07:38 -07:00
*/
2019-04-25 13:52:56 -06:00
# include <linux/atomic.h>
2014-11-03 11:07:38 -07:00
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/types.h>
# include <linux/device.h>
# include <linux/io.h>
# include <linux/err.h>
# include <linux/fs.h>
# include <linux/miscdevice.h>
# include <linux/uaccess.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
2015-05-19 10:55:11 -06:00
# include <linux/pm_runtime.h>
2014-11-03 11:07:38 -07:00
# include <linux/seq_file.h>
# include <linux/coresight.h>
# include <linux/amba/bus.h>
2015-05-19 10:55:16 -06:00
# include <linux/clk.h>
2016-02-17 17:52:00 -07:00
# include <linux/circ_buf.h>
# include <linux/mm.h>
# include <linux/perf_event.h>
2014-11-03 11:07:38 -07:00
# include "coresight-priv.h"
2018-09-20 13:17:56 -06:00
# include "coresight-etm-perf.h"
2014-11-03 11:07:38 -07:00
# define ETB_RAM_DEPTH_REG 0x004
# define ETB_STATUS_REG 0x00c
# define ETB_RAM_READ_DATA_REG 0x010
# define ETB_RAM_READ_POINTER 0x014
# define ETB_RAM_WRITE_POINTER 0x018
# define ETB_TRG 0x01c
# define ETB_CTL_REG 0x020
# define ETB_RWD_REG 0x024
# define ETB_FFSR 0x300
# define ETB_FFCR 0x304
# define ETB_ITMISCOP0 0xee0
# define ETB_ITTRFLINACK 0xee4
# define ETB_ITTRFLIN 0xee8
# define ETB_ITATBDATA0 0xeeC
# define ETB_ITATBCTR2 0xef0
# define ETB_ITATBCTR1 0xef4
# define ETB_ITATBCTR0 0xef8
/* register description */
/* STS - 0x00C */
# define ETB_STATUS_RAM_FULL BIT(0)
/* CTL - 0x020 */
# define ETB_CTL_CAPT_EN BIT(0)
/* FFCR - 0x304 */
# define ETB_FFCR_EN_FTC BIT(0)
# define ETB_FFCR_FON_MAN BIT(6)
# define ETB_FFCR_STOP_FI BIT(12)
# define ETB_FFCR_STOP_TRIGGER BIT(13)
# define ETB_FFCR_BIT 6
# define ETB_FFSR_BIT 1
# define ETB_FRAME_SIZE_WORDS 4
2019-06-19 13:53:04 -06:00
DEFINE_CORESIGHT_DEVLIST ( etb_devs , " etb " ) ;
2014-11-03 11:07:38 -07:00
/**
* struct etb_drvdata - specifics associated to an ETB component
* @ base : memory mapped base address for this component .
2015-05-19 10:55:16 -06:00
* @ atclk : optional clock for the core parts of the ETB .
2014-11-03 11:07:38 -07:00
* @ csdev : component vitals needed by the framework .
* @ miscdev : specifics to handle " /dev/xyz.etb " entry .
* @ spinlock : only one at a time pls .
2016-02-17 17:51:58 -07:00
* @ reading : synchronise user space access to etb buffer .
2019-04-25 13:53:10 -06:00
* @ pid : Process ID of the process being monitored by the session
* that is using this component .
2014-11-03 11:07:38 -07:00
* @ buf : area of memory where ETB buffer content gets sent .
2018-09-20 13:17:58 -06:00
* @ mode : this ETB is being used .
2014-11-03 11:07:38 -07:00
* @ buffer_depth : size of @ buf .
* @ trigger_cntr : amount of words to store after a trigger .
*/
struct etb_drvdata {
void __iomem * base ;
2015-05-19 10:55:16 -06:00
struct clk * atclk ;
2014-11-03 11:07:38 -07:00
struct coresight_device * csdev ;
struct miscdevice miscdev ;
spinlock_t spinlock ;
2016-02-17 17:51:58 -07:00
local_t reading ;
2019-04-25 13:53:10 -06:00
pid_t pid ;
2014-11-03 11:07:38 -07:00
u8 * buf ;
2018-09-20 13:17:58 -06:00
u32 mode ;
2014-11-03 11:07:38 -07:00
u32 buffer_depth ;
u32 trigger_cntr ;
} ;
2018-09-20 13:17:56 -06:00
static int etb_set_buffer ( struct coresight_device * csdev ,
struct perf_output_handle * handle ) ;
2019-04-25 13:52:47 -06:00
static inline unsigned int etb_get_buffer_depth ( struct etb_drvdata * drvdata )
2014-11-03 11:07:38 -07:00
{
2019-04-25 13:52:47 -06:00
return readl_relaxed ( drvdata - > base + ETB_RAM_DEPTH_REG ) ;
2014-11-03 11:07:38 -07:00
}
2018-09-20 13:18:09 -06:00
static void __etb_enable_hw ( struct etb_drvdata * drvdata )
2014-11-03 11:07:38 -07:00
{
int i ;
u32 depth ;
CS_UNLOCK ( drvdata - > base ) ;
depth = drvdata - > buffer_depth ;
/* reset write RAM pointer address */
writel_relaxed ( 0x0 , drvdata - > base + ETB_RAM_WRITE_POINTER ) ;
/* clear entire RAM buffer */
for ( i = 0 ; i < depth ; i + + )
writel_relaxed ( 0x0 , drvdata - > base + ETB_RWD_REG ) ;
/* reset write RAM pointer address */
writel_relaxed ( 0x0 , drvdata - > base + ETB_RAM_WRITE_POINTER ) ;
/* reset read RAM pointer address */
writel_relaxed ( 0x0 , drvdata - > base + ETB_RAM_READ_POINTER ) ;
writel_relaxed ( drvdata - > trigger_cntr , drvdata - > base + ETB_TRG ) ;
writel_relaxed ( ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER ,
drvdata - > base + ETB_FFCR ) ;
/* ETB trace capture enable */
writel_relaxed ( ETB_CTL_CAPT_EN , drvdata - > base + ETB_CTL_REG ) ;
CS_LOCK ( drvdata - > base ) ;
}
2018-09-20 13:18:09 -06:00
static int etb_enable_hw ( struct etb_drvdata * drvdata )
{
2018-11-30 11:43:03 -07:00
int rc = coresight_claim_device ( drvdata - > base ) ;
if ( rc )
return rc ;
2018-09-20 13:18:09 -06:00
__etb_enable_hw ( drvdata ) ;
return 0 ;
}
2018-09-20 13:17:59 -06:00
static int etb_enable_sysfs ( struct coresight_device * csdev )
2014-11-03 11:07:38 -07:00
{
2018-09-20 13:17:56 -06:00
int ret = 0 ;
2014-11-03 11:07:38 -07:00
unsigned long flags ;
2016-02-17 17:51:59 -07:00
struct etb_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
2018-09-20 13:17:58 -06:00
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2018-09-20 13:17:56 -06:00
2018-09-20 13:17:59 -06:00
/* Don't messup with perf sessions. */
2018-09-20 13:17:58 -06:00
if ( drvdata - > mode = = CS_MODE_PERF ) {
ret = - EBUSY ;
goto out ;
}
2016-02-17 17:51:59 -07:00
2019-04-25 13:52:56 -06:00
if ( drvdata - > mode = = CS_MODE_DISABLED ) {
ret = etb_enable_hw ( drvdata ) ;
if ( ret )
goto out ;
2018-09-20 13:17:46 -06:00
2018-09-20 13:18:09 -06:00
drvdata - > mode = CS_MODE_SYSFS ;
2019-04-25 13:52:56 -06:00
}
2018-09-20 13:17:59 -06:00
2019-04-25 13:52:56 -06:00
atomic_inc ( csdev - > refcnt ) ;
2018-09-20 13:17:59 -06:00
out :
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
return ret ;
}
static int etb_enable_perf ( struct coresight_device * csdev , void * data )
{
int ret = 0 ;
2019-04-25 13:53:10 -06:00
pid_t pid ;
2018-09-20 13:17:59 -06:00
unsigned long flags ;
struct etb_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
2019-04-25 13:53:10 -06:00
struct perf_output_handle * handle = data ;
2018-09-20 13:17:59 -06:00
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2019-04-25 13:53:10 -06:00
/* No need to continue if the component is already in used by sysFS. */
if ( drvdata - > mode = = CS_MODE_SYSFS ) {
ret = - EBUSY ;
goto out ;
}
/* Get a handle on the pid of the process to monitor */
pid = task_pid_nr ( handle - > event - > owner ) ;
if ( drvdata - > pid ! = - 1 & & drvdata - > pid ! = pid ) {
2018-09-20 13:17:59 -06:00
ret = - EBUSY ;
2016-02-17 17:51:59 -07:00
goto out ;
2018-09-20 13:17:59 -06:00
}
2014-11-03 11:07:38 -07:00
2019-04-25 13:53:10 -06:00
/*
* No HW configuration is needed if the sink is already in
* use for this session .
*/
if ( drvdata - > pid = = pid ) {
atomic_inc ( csdev - > refcnt ) ;
goto out ;
}
2018-09-20 13:17:58 -06:00
/*
* We don ' t have an internal state to clean up if we fail to setup
* the perf buffer . So we can perform the step before we turn the
* ETB on and leave without cleaning up .
*/
2019-04-25 13:53:10 -06:00
ret = etb_set_buffer ( csdev , handle ) ;
2018-09-20 13:17:59 -06:00
if ( ret )
goto out ;
2018-09-20 13:17:58 -06:00
2018-09-20 13:18:09 -06:00
ret = etb_enable_hw ( drvdata ) ;
2019-04-25 13:52:56 -06:00
if ( ! ret ) {
2019-04-25 13:53:10 -06:00
/* Associate with monitored process. */
drvdata - > pid = pid ;
2018-09-20 13:18:09 -06:00
drvdata - > mode = CS_MODE_PERF ;
2019-04-25 13:52:56 -06:00
atomic_inc ( csdev - > refcnt ) ;
}
2014-11-03 11:07:38 -07:00
2016-02-17 17:51:59 -07:00
out :
2018-09-20 13:17:58 -06:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2018-09-20 13:17:56 -06:00
return ret ;
2014-11-03 11:07:38 -07:00
}
2018-09-20 13:17:59 -06:00
static int etb_enable ( struct coresight_device * csdev , u32 mode , void * data )
{
int ret ;
switch ( mode ) {
case CS_MODE_SYSFS :
ret = etb_enable_sysfs ( csdev ) ;
break ;
case CS_MODE_PERF :
ret = etb_enable_perf ( csdev , data ) ;
break ;
default :
ret = - EINVAL ;
break ;
}
if ( ret )
return ret ;
2019-06-19 11:29:17 -06:00
dev_dbg ( & csdev - > dev , " ETB enabled \n " ) ;
2018-09-20 13:17:59 -06:00
return 0 ;
}
2018-11-30 11:43:03 -07:00
static void __etb_disable_hw ( struct etb_drvdata * drvdata )
2014-11-03 11:07:38 -07:00
{
u32 ffcr ;
2019-06-19 11:29:17 -06:00
struct device * dev = & drvdata - > csdev - > dev ;
2014-11-03 11:07:38 -07:00
CS_UNLOCK ( drvdata - > base ) ;
ffcr = readl_relaxed ( drvdata - > base + ETB_FFCR ) ;
/* stop formatter when a stop has completed */
ffcr | = ETB_FFCR_STOP_FI ;
writel_relaxed ( ffcr , drvdata - > base + ETB_FFCR ) ;
/* manually generate a flush of the system */
ffcr | = ETB_FFCR_FON_MAN ;
writel_relaxed ( ffcr , drvdata - > base + ETB_FFCR ) ;
if ( coresight_timeout ( drvdata - > base , ETB_FFCR , ETB_FFCR_BIT , 0 ) ) {
2019-06-19 11:29:17 -06:00
dev_err ( dev ,
2016-08-25 15:19:00 -06:00
" timeout while waiting for completion of Manual Flush \n " ) ;
2014-11-03 11:07:38 -07:00
}
/* disable trace capture */
writel_relaxed ( 0x0 , drvdata - > base + ETB_CTL_REG ) ;
if ( coresight_timeout ( drvdata - > base , ETB_FFSR , ETB_FFSR_BIT , 1 ) ) {
2019-06-19 11:29:17 -06:00
dev_err ( dev ,
2016-08-25 15:19:00 -06:00
" timeout while waiting for Formatter to Stop \n " ) ;
2014-11-03 11:07:38 -07:00
}
CS_LOCK ( drvdata - > base ) ;
}
static void etb_dump_hw ( struct etb_drvdata * drvdata )
{
2017-08-02 10:21:57 -06:00
bool lost = false ;
2014-11-03 11:07:38 -07:00
int i ;
u8 * buf_ptr ;
u32 read_data , depth ;
u32 read_ptr , write_ptr ;
u32 frame_off , frame_endoff ;
2019-06-19 11:29:17 -06:00
struct device * dev = & drvdata - > csdev - > dev ;
2014-11-03 11:07:38 -07:00
CS_UNLOCK ( drvdata - > base ) ;
read_ptr = readl_relaxed ( drvdata - > base + ETB_RAM_READ_POINTER ) ;
write_ptr = readl_relaxed ( drvdata - > base + ETB_RAM_WRITE_POINTER ) ;
frame_off = write_ptr % ETB_FRAME_SIZE_WORDS ;
frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off ;
if ( frame_off ) {
2019-06-19 11:29:17 -06:00
dev_err ( dev ,
2014-11-03 11:07:38 -07:00
" write_ptr: %lu not aligned to formatter frame size \n " ,
( unsigned long ) write_ptr ) ;
2019-06-19 11:29:17 -06:00
dev_err ( dev , " frameoff: %lu, frame_endoff: %lu \n " ,
2014-11-03 11:07:38 -07:00
( unsigned long ) frame_off , ( unsigned long ) frame_endoff ) ;
write_ptr + = frame_endoff ;
}
if ( ( readl_relaxed ( drvdata - > base + ETB_STATUS_REG )
2017-08-02 10:21:57 -06:00
& ETB_STATUS_RAM_FULL ) = = 0 ) {
2014-11-03 11:07:38 -07:00
writel_relaxed ( 0x0 , drvdata - > base + ETB_RAM_READ_POINTER ) ;
2017-08-02 10:21:57 -06:00
} else {
2014-11-03 11:07:38 -07:00
writel_relaxed ( write_ptr , drvdata - > base + ETB_RAM_READ_POINTER ) ;
2017-08-02 10:21:57 -06:00
lost = true ;
}
2014-11-03 11:07:38 -07:00
depth = drvdata - > buffer_depth ;
buf_ptr = drvdata - > buf ;
for ( i = 0 ; i < depth ; i + + ) {
read_data = readl_relaxed ( drvdata - > base +
ETB_RAM_READ_DATA_REG ) ;
2017-08-02 10:21:56 -06:00
* ( u32 * ) buf_ptr = read_data ;
buf_ptr + = 4 ;
2014-11-03 11:07:38 -07:00
}
2018-07-11 13:40:18 -06:00
if ( lost )
coresight_insert_barrier_packet ( drvdata - > buf ) ;
2014-11-03 11:07:38 -07:00
if ( frame_off ) {
buf_ptr - = ( frame_endoff * 4 ) ;
for ( i = 0 ; i < frame_endoff ; i + + ) {
* buf_ptr + + = 0x0 ;
* buf_ptr + + = 0x0 ;
* buf_ptr + + = 0x0 ;
* buf_ptr + + = 0x0 ;
}
}
writel_relaxed ( read_ptr , drvdata - > base + ETB_RAM_READ_POINTER ) ;
CS_LOCK ( drvdata - > base ) ;
}
2018-11-30 11:43:03 -07:00
static void etb_disable_hw ( struct etb_drvdata * drvdata )
{
__etb_disable_hw ( drvdata ) ;
etb_dump_hw ( drvdata ) ;
coresight_disclaim_device ( drvdata - > base ) ;
}
2019-04-25 13:52:55 -06:00
static int etb_disable ( struct coresight_device * csdev )
2014-11-03 11:07:38 -07:00
{
struct etb_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
unsigned long flags ;
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2019-04-25 13:52:56 -06:00
if ( atomic_dec_return ( csdev - > refcnt ) ) {
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
return - EBUSY ;
}
2019-04-25 13:52:57 -06:00
/* Complain if we (somehow) got out of sync */
WARN_ON_ONCE ( drvdata - > mode = = CS_MODE_DISABLED ) ;
etb_disable_hw ( drvdata ) ;
2019-04-25 13:53:10 -06:00
/* Dissociate from monitored process. */
drvdata - > pid = - 1 ;
2019-04-25 13:52:57 -06:00
drvdata - > mode = CS_MODE_DISABLED ;
2018-09-20 13:17:58 -06:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2016-02-17 17:51:59 -07:00
2019-06-19 11:29:17 -06:00
dev_dbg ( & csdev - > dev , " ETB disabled \n " ) ;
2019-04-25 13:52:55 -06:00
return 0 ;
2014-11-03 11:07:38 -07:00
}
2019-04-25 13:53:01 -06:00
static void * etb_alloc_buffer ( struct coresight_device * csdev ,
struct perf_event * event , void * * pages ,
int nr_pages , bool overwrite )
2016-02-17 17:52:00 -07:00
{
2019-06-20 16:12:36 -06:00
int node ;
2016-02-17 17:52:00 -07:00
struct cs_buffers * buf ;
2019-06-20 16:12:36 -06:00
node = ( event - > cpu = = - 1 ) ? NUMA_NO_NODE : cpu_to_node ( event - > cpu ) ;
2016-02-17 17:52:00 -07:00
buf = kzalloc_node ( sizeof ( struct cs_buffers ) , GFP_KERNEL , node ) ;
if ( ! buf )
return NULL ;
buf - > snapshot = overwrite ;
buf - > nr_pages = nr_pages ;
buf - > data_pages = pages ;
return buf ;
}
static void etb_free_buffer ( void * config )
{
struct cs_buffers * buf = config ;
kfree ( buf ) ;
}
static int etb_set_buffer ( struct coresight_device * csdev ,
2018-09-20 13:17:56 -06:00
struct perf_output_handle * handle )
2016-02-17 17:52:00 -07:00
{
int ret = 0 ;
unsigned long head ;
2018-09-20 13:17:56 -06:00
struct cs_buffers * buf = etm_perf_sink_config ( handle ) ;
if ( ! buf )
return - EINVAL ;
2016-02-17 17:52:00 -07:00
/* wrap head around to the amount of space we have */
head = handle - > head & ( ( buf - > nr_pages < < PAGE_SHIFT ) - 1 ) ;
/* find the page to write to */
buf - > cur = head / PAGE_SIZE ;
/* and offset within that page */
buf - > offset = head % PAGE_SIZE ;
local_set ( & buf - > data_size , 0 ) ;
return ret ;
}
2018-09-20 13:17:54 -06:00
static unsigned long etb_update_buffer ( struct coresight_device * csdev ,
2016-02-17 17:52:00 -07:00
struct perf_output_handle * handle ,
void * sink_config )
{
2017-08-02 10:21:55 -06:00
bool lost = false ;
2016-02-17 17:52:00 -07:00
int i , cur ;
u8 * buf_ptr ;
2017-08-02 10:21:57 -06:00
const u32 * barrier ;
2016-02-17 17:52:00 -07:00
u32 read_ptr , write_ptr , capacity ;
2018-09-20 13:17:54 -06:00
u32 status , read_data ;
2019-04-25 13:53:10 -06:00
unsigned long offset , to_read = 0 , flags ;
2016-02-17 17:52:00 -07:00
struct cs_buffers * buf = sink_config ;
struct etb_drvdata * drvdata = dev_get_drvdata ( csdev - > dev . parent ) ;
if ( ! buf )
2018-09-20 13:17:54 -06:00
return 0 ;
2016-02-17 17:52:00 -07:00
capacity = drvdata - > buffer_depth * ETB_FRAME_SIZE_WORDS ;
2019-04-25 13:52:58 -06:00
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2019-04-25 13:53:10 -06:00
/* Don't do anything if another tracer is using this sink */
if ( atomic_read ( csdev - > refcnt ) ! = 1 )
goto out ;
2018-11-30 11:43:03 -07:00
__etb_disable_hw ( drvdata ) ;
2017-08-02 10:21:58 -06:00
CS_UNLOCK ( drvdata - > base ) ;
2016-02-17 17:52:00 -07:00
/* unit is in words, not bytes */
read_ptr = readl_relaxed ( drvdata - > base + ETB_RAM_READ_POINTER ) ;
write_ptr = readl_relaxed ( drvdata - > base + ETB_RAM_WRITE_POINTER ) ;
/*
* Entries should be aligned to the frame size . If they are not
2017-06-05 14:15:08 -06:00
* go back to the last alignment point to give decoding tools a
2016-02-17 17:52:00 -07:00
* chance to fix things .
*/
if ( write_ptr % ETB_FRAME_SIZE_WORDS ) {
2019-06-19 11:29:17 -06:00
dev_err ( & csdev - > dev ,
2016-02-17 17:52:00 -07:00
" write_ptr: %lu not aligned to formatter frame size \n " ,
( unsigned long ) write_ptr ) ;
write_ptr & = ~ ( ETB_FRAME_SIZE_WORDS - 1 ) ;
2017-08-02 10:21:55 -06:00
lost = true ;
2016-02-17 17:52:00 -07:00
}
/*
* Get a hold of the status register and see if a wrap around
* has occurred . If so adjust things accordingly . Otherwise
* start at the beginning and go until the write pointer has
* been reached .
*/
status = readl_relaxed ( drvdata - > base + ETB_STATUS_REG ) ;
if ( status & ETB_STATUS_RAM_FULL ) {
2017-08-02 10:21:55 -06:00
lost = true ;
2016-02-17 17:52:00 -07:00
to_read = capacity ;
read_ptr = write_ptr ;
} else {
to_read = CIRC_CNT ( write_ptr , read_ptr , drvdata - > buffer_depth ) ;
to_read * = ETB_FRAME_SIZE_WORDS ;
}
/*
* Make sure we don ' t overwrite data that hasn ' t been consumed yet .
* It is entirely possible that the HW buffer has more data than the
* ring buffer can currently handle . If so adjust the start address
* to take only the last traces .
*
* In snapshot mode we are looking to get the latest traces only and as
* such , we don ' t care about not overwriting data that hasn ' t been
* processed by user space .
*/
if ( ! buf - > snapshot & & to_read > handle - > size ) {
u32 mask = ~ ( ETB_FRAME_SIZE_WORDS - 1 ) ;
/* The new read pointer must be frame size aligned */
2016-05-03 11:33:41 -06:00
to_read = handle - > size & mask ;
2016-02-17 17:52:00 -07:00
/*
* Move the RAM read pointer up , keeping in mind that
* everything is in frame size units .
*/
read_ptr = ( write_ptr + drvdata - > buffer_depth ) -
to_read / ETB_FRAME_SIZE_WORDS ;
/* Wrap around if need be*/
2016-05-03 11:34:01 -06:00
if ( read_ptr > ( drvdata - > buffer_depth - 1 ) )
read_ptr - = drvdata - > buffer_depth ;
2016-02-17 17:52:00 -07:00
/* let the decoder know we've skipped ahead */
2017-08-02 10:21:55 -06:00
lost = true ;
2016-02-17 17:52:00 -07:00
}
2019-06-19 11:29:09 -06:00
/*
* Don ' t set the TRUNCATED flag in snapshot mode because 1 ) the
* captured buffer is expected to be truncated and 2 ) a full buffer
* prevents the event from being re - enabled by the perf core ,
* resulting in stale data being send to user space .
*/
if ( ! buf - > snapshot & & lost )
2017-08-02 10:21:55 -06:00
perf_aux_output_flag ( handle , PERF_AUX_FLAG_TRUNCATED ) ;
2016-02-17 17:52:00 -07:00
/* finally tell HW where we want to start reading from */
writel_relaxed ( read_ptr , drvdata - > base + ETB_RAM_READ_POINTER ) ;
cur = buf - > cur ;
offset = buf - > offset ;
2017-08-02 10:21:57 -06:00
barrier = barrier_pkt ;
2016-02-17 17:52:00 -07:00
for ( i = 0 ; i < to_read ; i + = 4 ) {
buf_ptr = buf - > data_pages [ cur ] + offset ;
read_data = readl_relaxed ( drvdata - > base +
ETB_RAM_READ_DATA_REG ) ;
2018-07-11 13:40:18 -06:00
if ( lost & & i < CORESIGHT_BARRIER_PKT_SIZE ) {
2017-08-02 10:21:57 -06:00
read_data = * barrier ;
barrier + + ;
}
2017-08-02 10:21:56 -06:00
* ( u32 * ) buf_ptr = read_data ;
buf_ptr + = 4 ;
2016-02-17 17:52:00 -07:00
offset + = 4 ;
if ( offset > = PAGE_SIZE ) {
offset = 0 ;
cur + + ;
/* wrap around at the end of the buffer */
cur & = buf - > nr_pages - 1 ;
}
}
/* reset ETB buffer for next run */
writel_relaxed ( 0x0 , drvdata - > base + ETB_RAM_READ_POINTER ) ;
writel_relaxed ( 0x0 , drvdata - > base + ETB_RAM_WRITE_POINTER ) ;
/*
2019-06-19 11:29:05 -06:00
* In snapshot mode we simply increment the head by the number of byte
* that were written . User space function cs_etm_find_snapshot ( ) will
* figure out how many bytes to get from the AUX buffer based on the
* position of the head .
2016-02-17 17:52:00 -07:00
*/
2019-06-19 11:29:05 -06:00
if ( buf - > snapshot )
handle - > head + = to_read ;
2018-11-30 11:43:03 -07:00
__etb_enable_hw ( drvdata ) ;
2016-02-17 17:52:00 -07:00
CS_LOCK ( drvdata - > base ) ;
2019-04-25 13:53:10 -06:00
out :
2019-04-25 13:52:58 -06:00
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2018-09-20 13:17:54 -06:00
return to_read ;
2016-02-17 17:52:00 -07:00
}
2014-11-03 11:07:38 -07:00
static const struct coresight_ops_sink etb_sink_ops = {
. enable = etb_enable ,
. disable = etb_disable ,
2016-02-17 17:52:00 -07:00
. alloc_buffer = etb_alloc_buffer ,
. free_buffer = etb_free_buffer ,
. update_buffer = etb_update_buffer ,
2014-11-03 11:07:38 -07:00
} ;
static const struct coresight_ops etb_cs_ops = {
. sink_ops = & etb_sink_ops ,
} ;
static void etb_dump ( struct etb_drvdata * drvdata )
{
unsigned long flags ;
spin_lock_irqsave ( & drvdata - > spinlock , flags ) ;
2018-09-20 13:17:58 -06:00
if ( drvdata - > mode = = CS_MODE_SYSFS ) {
2018-11-30 11:43:03 -07:00
__etb_disable_hw ( drvdata ) ;
2014-11-03 11:07:38 -07:00
etb_dump_hw ( drvdata ) ;
2018-11-30 11:43:03 -07:00
__etb_enable_hw ( drvdata ) ;
2014-11-03 11:07:38 -07:00
}
spin_unlock_irqrestore ( & drvdata - > spinlock , flags ) ;
2019-06-19 11:29:17 -06:00
dev_dbg ( & drvdata - > csdev - > dev , " ETB dumped \n " ) ;
2014-11-03 11:07:38 -07:00
}
static int etb_open ( struct inode * inode , struct file * file )
{
struct etb_drvdata * drvdata = container_of ( file - > private_data ,
struct etb_drvdata , miscdev ) ;
2016-02-17 17:51:58 -07:00
if ( local_cmpxchg ( & drvdata - > reading , 0 , 1 ) )
2014-11-03 11:07:38 -07:00
return - EBUSY ;
2019-06-19 11:29:17 -06:00
dev_dbg ( & drvdata - > csdev - > dev , " %s: successfully opened \n " , __func__ ) ;
2014-11-03 11:07:38 -07:00
return 0 ;
}
static ssize_t etb_read ( struct file * file , char __user * data ,
size_t len , loff_t * ppos )
{
u32 depth ;
struct etb_drvdata * drvdata = container_of ( file - > private_data ,
struct etb_drvdata , miscdev ) ;
2019-06-19 11:29:17 -06:00
struct device * dev = & drvdata - > csdev - > dev ;
2014-11-03 11:07:38 -07:00
etb_dump ( drvdata ) ;
depth = drvdata - > buffer_depth ;
if ( * ppos + len > depth * 4 )
len = depth * 4 - * ppos ;
if ( copy_to_user ( data , drvdata - > buf + * ppos , len ) ) {
2019-06-19 11:29:17 -06:00
dev_dbg ( dev ,
" %s: copy_to_user failed \n " , __func__ ) ;
2014-11-03 11:07:38 -07:00
return - EFAULT ;
}
* ppos + = len ;
2019-06-19 11:29:17 -06:00
dev_dbg ( dev , " %s: %zu bytes copied, %d bytes left \n " ,
2015-03-30 14:13:35 -06:00
__func__ , len , ( int ) ( depth * 4 - * ppos ) ) ;
2014-11-03 11:07:38 -07:00
return len ;
}
static int etb_release ( struct inode * inode , struct file * file )
{
struct etb_drvdata * drvdata = container_of ( file - > private_data ,
struct etb_drvdata , miscdev ) ;
2016-02-17 17:51:58 -07:00
local_set ( & drvdata - > reading , 0 ) ;
2014-11-03 11:07:38 -07:00
2019-06-19 11:29:17 -06:00
dev_dbg ( & drvdata - > csdev - > dev , " %s: released \n " , __func__ ) ;
2014-11-03 11:07:38 -07:00
return 0 ;
}
static const struct file_operations etb_fops = {
. owner = THIS_MODULE ,
. open = etb_open ,
. read = etb_read ,
. release = etb_release ,
. llseek = no_llseek ,
} ;
2017-08-02 10:22:06 -06:00
# define coresight_etb10_reg(name, offset) \
coresight_simple_reg32 ( struct etb_drvdata , name , offset )
coresight_etb10_reg ( rdp , ETB_RAM_DEPTH_REG ) ;
coresight_etb10_reg ( sts , ETB_STATUS_REG ) ;
coresight_etb10_reg ( rrp , ETB_RAM_READ_POINTER ) ;
coresight_etb10_reg ( rwp , ETB_RAM_WRITE_POINTER ) ;
coresight_etb10_reg ( trg , ETB_TRG ) ;
coresight_etb10_reg ( ctl , ETB_CTL_REG ) ;
coresight_etb10_reg ( ffsr , ETB_FFSR ) ;
coresight_etb10_reg ( ffcr , ETB_FFCR ) ;
2016-04-05 11:53:51 -06:00
static struct attribute * coresight_etb_mgmt_attrs [ ] = {
& dev_attr_rdp . attr ,
& dev_attr_sts . attr ,
& dev_attr_rrp . attr ,
& dev_attr_rwp . attr ,
& dev_attr_trg . attr ,
& dev_attr_ctl . attr ,
& dev_attr_ffsr . attr ,
& dev_attr_ffcr . attr ,
NULL ,
} ;
2014-11-03 11:07:38 -07:00
static ssize_t trigger_cntr_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct etb_drvdata * drvdata = dev_get_drvdata ( dev - > parent ) ;
unsigned long val = drvdata - > trigger_cntr ;
return sprintf ( buf , " %#lx \n " , val ) ;
}
static ssize_t trigger_cntr_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t size )
{
int ret ;
unsigned long val ;
struct etb_drvdata * drvdata = dev_get_drvdata ( dev - > parent ) ;
ret = kstrtoul ( buf , 16 , & val ) ;
if ( ret )
return ret ;
drvdata - > trigger_cntr = val ;
return size ;
}
static DEVICE_ATTR_RW ( trigger_cntr ) ;
static struct attribute * coresight_etb_attrs [ ] = {
& dev_attr_trigger_cntr . attr ,
NULL ,
} ;
2016-04-05 11:53:51 -06:00
static const struct attribute_group coresight_etb_group = {
. attrs = coresight_etb_attrs ,
} ;
static const struct attribute_group coresight_etb_mgmt_group = {
. attrs = coresight_etb_mgmt_attrs ,
. name = " mgmt " ,
} ;
const struct attribute_group * coresight_etb_groups [ ] = {
& coresight_etb_group ,
& coresight_etb_mgmt_group ,
NULL ,
} ;
2014-11-03 11:07:38 -07:00
static int etb_probe ( struct amba_device * adev , const struct amba_id * id )
{
int ret ;
void __iomem * base ;
struct device * dev = & adev - > dev ;
struct coresight_platform_data * pdata = NULL ;
struct etb_drvdata * drvdata ;
struct resource * res = & adev - > res ;
2016-08-25 15:19:05 -06:00
struct coresight_desc desc = { 0 } ;
2014-11-03 11:07:38 -07:00
2019-06-19 13:53:04 -06:00
desc . name = coresight_alloc_device_name ( & etb_devs , dev ) ;
if ( ! desc . name )
return - ENOMEM ;
2014-11-03 11:07:38 -07:00
drvdata = devm_kzalloc ( dev , sizeof ( * drvdata ) , GFP_KERNEL ) ;
if ( ! drvdata )
return - ENOMEM ;
2015-05-19 10:55:16 -06:00
drvdata - > atclk = devm_clk_get ( & adev - > dev , " atclk " ) ; /* optional */
if ( ! IS_ERR ( drvdata - > atclk ) ) {
ret = clk_prepare_enable ( drvdata - > atclk ) ;
if ( ret )
return ret ;
}
2014-11-03 11:07:38 -07:00
dev_set_drvdata ( dev , drvdata ) ;
/* validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( base ) )
return PTR_ERR ( base ) ;
drvdata - > base = base ;
spin_lock_init ( & drvdata - > spinlock ) ;
2015-01-26 09:22:20 -07:00
drvdata - > buffer_depth = etb_get_buffer_depth ( drvdata ) ;
2014-11-03 11:07:38 -07:00
2015-04-10 09:25:37 -06:00
if ( drvdata - > buffer_depth & 0x80000000 )
2014-11-03 11:07:38 -07:00
return - EINVAL ;
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:07:58 -07:00
drvdata - > buf = devm_kcalloc ( dev ,
drvdata - > buffer_depth , 4 , GFP_KERNEL ) ;
2017-06-05 14:15:07 -06:00
if ( ! drvdata - > buf )
2014-11-03 11:07:38 -07:00
return - ENOMEM ;
2019-04-25 13:53:10 -06:00
/* This device is not associated with a session */
drvdata - > pid = - 1 ;
2019-06-19 13:53:00 -06:00
pdata = coresight_get_platform_data ( dev ) ;
if ( IS_ERR ( pdata ) )
return PTR_ERR ( pdata ) ;
adev - > dev . platform_data = pdata ;
2016-08-25 15:19:05 -06:00
desc . type = CORESIGHT_DEV_TYPE_SINK ;
desc . subtype . sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER ;
desc . ops = & etb_cs_ops ;
desc . pdata = pdata ;
desc . dev = dev ;
desc . groups = coresight_etb_groups ;
drvdata - > csdev = coresight_register ( & desc ) ;
2014-11-03 11:07:38 -07:00
if ( IS_ERR ( drvdata - > csdev ) )
return PTR_ERR ( drvdata - > csdev ) ;
2019-06-19 13:52:57 -06:00
drvdata - > miscdev . name = desc . name ;
2014-11-03 11:07:38 -07:00
drvdata - > miscdev . minor = MISC_DYNAMIC_MINOR ;
drvdata - > miscdev . fops = & etb_fops ;
ret = misc_register ( & drvdata - > miscdev ) ;
if ( ret )
goto err_misc_register ;
2019-04-25 13:52:47 -06:00
pm_runtime_put ( & adev - > dev ) ;
2014-11-03 11:07:38 -07:00
return 0 ;
err_misc_register :
coresight_unregister ( drvdata - > csdev ) ;
return ret ;
}
2015-05-19 10:55:16 -06:00
# ifdef CONFIG_PM
static int etb_runtime_suspend ( struct device * dev )
{
struct etb_drvdata * drvdata = dev_get_drvdata ( dev ) ;
if ( drvdata & & ! IS_ERR ( drvdata - > atclk ) )
clk_disable_unprepare ( drvdata - > atclk ) ;
return 0 ;
}
static int etb_runtime_resume ( struct device * dev )
{
struct etb_drvdata * drvdata = dev_get_drvdata ( dev ) ;
if ( drvdata & & ! IS_ERR ( drvdata - > atclk ) )
clk_prepare_enable ( drvdata - > atclk ) ;
return 0 ;
}
# endif
static const struct dev_pm_ops etb_dev_pm_ops = {
SET_RUNTIME_PM_OPS ( etb_runtime_suspend , etb_runtime_resume , NULL )
} ;
2017-08-24 22:05:58 +05:30
static const struct amba_id etb_ids [ ] = {
2014-11-03 11:07:38 -07:00
{
2017-10-10 14:32:12 -06:00
. id = 0x000bb907 ,
. mask = 0x000fffff ,
2014-11-03 11:07:38 -07:00
} ,
{ 0 , 0 } ,
} ;
static struct amba_driver etb_driver = {
. drv = {
. name = " coresight-etb10 " ,
. owner = THIS_MODULE ,
2015-05-19 10:55:16 -06:00
. pm = & etb_dev_pm_ops ,
2016-02-02 14:14:00 -07:00
. suppress_bind_attrs = true ,
2015-05-19 10:55:16 -06:00
2014-11-03 11:07:38 -07:00
} ,
. probe = etb_probe ,
. id_table = etb_ids ,
} ;
2016-02-17 17:52:03 -07:00
builtin_amba_driver ( etb_driver ) ;