2014-05-30 14:47:38 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
/* For debugging crashes, userspace can:
*
* tail - f / sys / kernel / debug / dri / < minor > / rd > logfile . rd
*
2017-09-15 09:04:52 -04:00
* to log the cmdstream in a format that is understood by freedreno / cffdump
2014-05-30 14:47:38 -04:00
* utility . By comparing the last successfully completed fence # , to the
* cmdstream for the next fence , you can narrow down which process and submit
* caused the gpu crash / lockup .
*
2017-09-15 09:04:52 -04:00
* Additionally :
*
* tail - f / sys / kernel / debug / dri / < minor > / hangrd > logfile . rd
*
* will capture just the cmdstream from submits which triggered a GPU hang .
*
2014-05-30 14:47:38 -04:00
* This bypasses drm_debugfs_create_files ( ) mainly because we need to use
* our own fops for a bit more control . In particular , we don ' t want to
* do anything if userspace doesn ' t have the debugfs file open .
2016-06-16 11:54:41 -04:00
*
* The module - param " rd_full " , which defaults to false , enables snapshotting
* all ( non - written ) buffers in the submit , rather than just cmdstream bo ' s .
* This is useful to capture the contents of ( for example ) vbo ' s or textures ,
* or shader programs ( if not emitted inline in cmdstream ) .
2014-05-30 14:47:38 -04:00
*/
# ifdef CONFIG_DEBUG_FS
# include <linux/kfifo.h>
# include <linux/debugfs.h>
# include <linux/circ_buf.h>
# include <linux/wait.h>
# include "msm_drv.h"
# include "msm_gpu.h"
# include "msm_gem.h"
2016-06-16 11:54:41 -04:00
static bool rd_full = false ;
MODULE_PARM_DESC ( rd_full , " If true, $debugfs/.../rd will snapshot all buffer contents " ) ;
module_param_named ( rd_full , rd_full , bool , 0600 ) ;
2014-05-30 14:47:38 -04:00
enum rd_sect_type {
RD_NONE ,
RD_TEST , /* ascii text */
RD_CMD , /* ascii text */
RD_GPUADDR , /* u32 gpuaddr, u32 size */
RD_CONTEXT , /* raw dump */
RD_CMDSTREAM , /* raw dump */
RD_CMDSTREAM_ADDR , /* gpu addr of cmdstream */
RD_PARAM , /* u32 param_type, u32 param_val, u32 bitlen */
RD_FLUSH , /* empty, clear previous params */
RD_PROGRAM , /* shader program, raw dump */
RD_VERT_SHADER ,
RD_FRAG_SHADER ,
RD_BUFFER_CONTENTS ,
RD_GPU_ID ,
} ;
# define BUF_SZ 512 /* should be power of 2 */
/* space used: */
# define circ_count(circ) \
( CIRC_CNT ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
# define circ_count_to_end(circ) \
( CIRC_CNT_TO_END ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
/* space available: */
# define circ_space(circ) \
( CIRC_SPACE ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
# define circ_space_to_end(circ) \
( CIRC_SPACE_TO_END ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
struct msm_rd_state {
struct drm_device * dev ;
bool open ;
/* current submit to read out: */
struct msm_gem_submit * submit ;
/* fifo access is synchronized on the producer side by
* struct_mutex held by submit code ( otherwise we could
* end up w / cmds logged in different order than they
* were executed ) . And read_lock synchronizes the reads
*/
struct mutex read_lock ;
wait_queue_head_t fifo_event ;
struct circ_buf fifo ;
char buf [ BUF_SZ ] ;
} ;
static void rd_write ( struct msm_rd_state * rd , const void * buf , int sz )
{
struct circ_buf * fifo = & rd - > fifo ;
const char * ptr = buf ;
while ( sz > 0 ) {
char * fptr = & fifo - > buf [ fifo - > head ] ;
int n ;
wait_event ( rd - > fifo_event , circ_space ( & rd - > fifo ) > 0 ) ;
2017-10-02 10:28:37 -04:00
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END ( ) does not access the tail more
* than once .
*/
2014-05-30 14:47:38 -04:00
n = min ( sz , circ_space_to_end ( & rd - > fifo ) ) ;
memcpy ( fptr , ptr , n ) ;
2017-10-02 10:28:37 -04:00
smp_store_release ( & fifo - > head , ( fifo - > head + n ) & ( BUF_SZ - 1 ) ) ;
2014-05-30 14:47:38 -04:00
sz - = n ;
ptr + = n ;
wake_up_all ( & rd - > fifo_event ) ;
}
}
static void rd_write_section ( struct msm_rd_state * rd ,
enum rd_sect_type type , const void * buf , int sz )
{
rd_write ( rd , & type , 4 ) ;
rd_write ( rd , & sz , 4 ) ;
rd_write ( rd , buf , sz ) ;
}
static ssize_t rd_read ( struct file * file , char __user * buf ,
size_t sz , loff_t * ppos )
{
struct msm_rd_state * rd = file - > private_data ;
struct circ_buf * fifo = & rd - > fifo ;
const char * fptr = & fifo - > buf [ fifo - > tail ] ;
int n = 0 , ret = 0 ;
mutex_lock ( & rd - > read_lock ) ;
ret = wait_event_interruptible ( rd - > fifo_event ,
circ_count ( & rd - > fifo ) > 0 ) ;
if ( ret )
goto out ;
2017-10-02 10:28:37 -04:00
/* Note that smp_load_acquire() is not strictly required
* as CIRC_CNT_TO_END ( ) does not access the head more than
* once .
*/
2014-05-30 14:47:38 -04:00
n = min_t ( int , sz , circ_count_to_end ( & rd - > fifo ) ) ;
2016-07-13 13:35:29 +03:00
if ( copy_to_user ( buf , fptr , n ) ) {
ret = - EFAULT ;
2014-05-30 14:47:38 -04:00
goto out ;
2016-07-13 13:35:29 +03:00
}
2014-05-30 14:47:38 -04:00
2017-10-02 10:28:37 -04:00
smp_store_release ( & fifo - > tail , ( fifo - > tail + n ) & ( BUF_SZ - 1 ) ) ;
2014-05-30 14:47:38 -04:00
* ppos + = n ;
wake_up_all ( & rd - > fifo_event ) ;
out :
mutex_unlock ( & rd - > read_lock ) ;
if ( ret )
return ret ;
return n ;
}
static int rd_open ( struct inode * inode , struct file * file )
{
struct msm_rd_state * rd = inode - > i_private ;
struct drm_device * dev = rd - > dev ;
struct msm_drm_private * priv = dev - > dev_private ;
struct msm_gpu * gpu = priv - > gpu ;
uint64_t val ;
uint32_t gpu_id ;
int ret = 0 ;
mutex_lock ( & dev - > struct_mutex ) ;
if ( rd - > open | | ! gpu ) {
ret = - EBUSY ;
goto out ;
}
file - > private_data = rd ;
rd - > open = true ;
/* the parsing tools need to know gpu-id to know which
* register database to load .
*/
gpu - > funcs - > get_param ( gpu , MSM_PARAM_GPU_ID , & val ) ;
gpu_id = val ;
rd_write_section ( rd , RD_GPU_ID , & gpu_id , sizeof ( gpu_id ) ) ;
out :
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
static int rd_release ( struct inode * inode , struct file * file )
{
struct msm_rd_state * rd = inode - > i_private ;
rd - > open = false ;
return 0 ;
}
static const struct file_operations rd_debugfs_fops = {
. owner = THIS_MODULE ,
. open = rd_open ,
. read = rd_read ,
. llseek = no_llseek ,
. release = rd_release ,
} ;
2017-09-15 09:04:52 -04:00
static void rd_cleanup ( struct msm_rd_state * rd )
{
if ( ! rd )
return ;
mutex_destroy ( & rd - > read_lock ) ;
kfree ( rd ) ;
}
static struct msm_rd_state * rd_init ( struct drm_minor * minor , const char * name )
2014-05-30 14:47:38 -04:00
{
struct msm_rd_state * rd ;
2017-01-26 23:56:11 +01:00
struct dentry * ent ;
2017-09-15 09:04:52 -04:00
int ret = 0 ;
2014-05-30 14:47:38 -04:00
rd = kzalloc ( sizeof ( * rd ) , GFP_KERNEL ) ;
if ( ! rd )
2017-09-15 09:04:52 -04:00
return ERR_PTR ( - ENOMEM ) ;
2014-05-30 14:47:38 -04:00
rd - > dev = minor - > dev ;
rd - > fifo . buf = rd - > buf ;
mutex_init ( & rd - > read_lock ) ;
init_waitqueue_head ( & rd - > fifo_event ) ;
2017-09-15 09:04:52 -04:00
ent = debugfs_create_file ( name , S_IFREG | S_IRUGO ,
2014-05-30 14:47:38 -04:00
minor - > debugfs_root , rd , & rd_debugfs_fops ) ;
2017-01-26 23:56:11 +01:00
if ( ! ent ) {
2017-09-15 09:04:52 -04:00
DRM_ERROR ( " Cannot create /sys/kernel/debug/dri/%pd/%s \n " ,
minor - > debugfs_root , name ) ;
ret = - ENOMEM ;
2014-05-30 14:47:38 -04:00
goto fail ;
}
2017-09-15 09:04:52 -04:00
return rd ;
fail :
rd_cleanup ( rd ) ;
return ERR_PTR ( ret ) ;
}
int msm_rd_debugfs_init ( struct drm_minor * minor )
{
struct msm_drm_private * priv = minor - > dev - > dev_private ;
struct msm_rd_state * rd ;
int ret ;
/* only create on first minor: */
if ( priv - > rd )
return 0 ;
rd = rd_init ( minor , " rd " ) ;
if ( IS_ERR ( rd ) ) {
ret = PTR_ERR ( rd ) ;
goto fail ;
}
priv - > rd = rd ;
rd = rd_init ( minor , " hangrd " ) ;
if ( IS_ERR ( rd ) ) {
ret = PTR_ERR ( rd ) ;
goto fail ;
}
priv - > hangrd = rd ;
2014-05-30 14:47:38 -04:00
return 0 ;
fail :
2017-03-07 21:49:22 +01:00
msm_rd_debugfs_cleanup ( priv ) ;
2017-09-15 09:04:52 -04:00
return ret ;
2014-05-30 14:47:38 -04:00
}
2017-03-07 21:49:22 +01:00
void msm_rd_debugfs_cleanup ( struct msm_drm_private * priv )
2014-05-30 14:47:38 -04:00
{
2017-09-15 09:04:52 -04:00
rd_cleanup ( priv - > rd ) ;
2014-05-30 14:47:38 -04:00
priv - > rd = NULL ;
2017-09-15 09:04:52 -04:00
rd_cleanup ( priv - > hangrd ) ;
priv - > hangrd = NULL ;
2014-05-30 14:47:38 -04:00
}
2016-06-16 11:49:09 -04:00
static void snapshot_buf ( struct msm_rd_state * rd ,
struct msm_gem_submit * submit , int idx ,
2016-11-11 11:08:45 -05:00
uint64_t iova , uint32_t size )
2016-06-16 11:49:09 -04:00
{
struct msm_gem_object * obj = submit - > bos [ idx ] . obj ;
const char * buf ;
2016-06-16 11:54:41 -04:00
if ( iova ) {
buf + = iova - submit - > bos [ idx ] . iova ;
} else {
iova = submit - > bos [ idx ] . iova ;
size = obj - > base . size ;
}
2016-06-16 11:49:09 -04:00
2017-10-20 11:07:03 -06:00
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
*/
2016-06-16 11:49:09 -04:00
rd_write_section ( rd , RD_GPUADDR ,
2016-11-11 11:08:45 -05:00
( uint32_t [ 3 ] ) { iova , size , iova > > 32 } , 12 ) ;
2017-10-20 11:07:03 -06:00
/* But only dump the contents of buffers marked READ */
if ( ! ( submit - > bos [ idx ] . flags & MSM_SUBMIT_BO_READ ) )
return ;
2017-09-15 08:38:20 -04:00
buf = msm_gem_get_vaddr_active ( & obj - > base ) ;
2017-10-20 11:07:03 -06:00
if ( IS_ERR ( buf ) )
return ;
2016-06-16 11:49:09 -04:00
rd_write_section ( rd , RD_BUFFER_CONTENTS , buf , size ) ;
2017-06-13 16:52:54 -06:00
msm_gem_put_vaddr ( & obj - > base ) ;
2016-06-16 11:49:09 -04:00
}
2014-05-30 14:47:38 -04:00
/* called under struct_mutex */
2017-09-15 10:46:45 -04:00
void msm_rd_dump_submit ( struct msm_rd_state * rd , struct msm_gem_submit * submit ,
const char * fmt , . . . )
2014-05-30 14:47:38 -04:00
{
struct drm_device * dev = submit - > dev ;
2017-09-15 09:04:52 -04:00
struct task_struct * task ;
2017-09-15 10:46:45 -04:00
char msg [ 256 ] ;
2014-05-30 14:47:38 -04:00
int i , n ;
if ( ! rd - > open )
return ;
/* writing into fifo is serialized by caller, and
* rd - > read_lock is used to serialize the reads
*/
WARN_ON ( ! mutex_is_locked ( & dev - > struct_mutex ) ) ;
2017-09-15 10:46:45 -04:00
if ( fmt ) {
va_list args ;
va_start ( args , fmt ) ;
n = vsnprintf ( msg , sizeof ( msg ) , fmt , args ) ;
va_end ( args ) ;
rd_write_section ( rd , RD_CMD , msg , ALIGN ( n , 4 ) ) ;
}
2017-09-15 09:04:52 -04:00
rcu_read_lock ( ) ;
task = pid_task ( submit - > pid , PIDTYPE_PID ) ;
if ( task ) {
n = snprintf ( msg , sizeof ( msg ) , " %.*s/%d: fence=%u " ,
TASK_COMM_LEN , task - > comm ,
pid_nr ( submit - > pid ) , submit - > seqno ) ;
} else {
n = snprintf ( msg , sizeof ( msg ) , " ???/%d: fence=%u " ,
pid_nr ( submit - > pid ) , submit - > seqno ) ;
}
rcu_read_unlock ( ) ;
2014-05-30 14:47:38 -04:00
rd_write_section ( rd , RD_CMD , msg , ALIGN ( n , 4 ) ) ;
2017-10-20 11:07:03 -06:00
for ( i = 0 ; rd_full & & i < submit - > nr_bos ; i + + )
snapshot_buf ( rd , submit , i , 0 , 0 ) ;
2014-05-30 14:47:38 -04:00
for ( i = 0 ; i < submit - > nr_cmds ; i + + ) {
2017-03-07 09:50:30 -07:00
uint64_t iova = submit - > cmd [ i ] . iova ;
2014-05-30 14:47:38 -04:00
uint32_t szd = submit - > cmd [ i ] . size ; /* in dwords */
2016-05-24 18:29:38 -04:00
2016-06-16 11:54:41 -04:00
/* snapshot cmdstream bo's (if we haven't already): */
if ( ! rd_full ) {
snapshot_buf ( rd , submit , submit - > cmd [ i ] . idx ,
submit - > cmd [ i ] . iova , szd * 4 ) ;
}
2014-05-30 14:47:38 -04:00
switch ( submit - > cmd [ i ] . type ) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF :
/* ignore IB-targets, we've logged the buffer, the
* parser tool will follow the IB based on the logged
* buffer / gpuaddr , so nothing more to do .
*/
break ;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF :
case MSM_SUBMIT_CMD_BUF :
rd_write_section ( rd , RD_CMDSTREAM_ADDR ,
2017-03-07 09:50:30 -07:00
( uint32_t [ 3 ] ) { iova , szd , iova > > 32 } , 12 ) ;
2014-05-30 14:47:38 -04:00
break ;
}
}
}
# endif