2014-05-30 14:47:38 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
/* For debugging crashes, userspace can:
*
* tail - f / sys / kernel / debug / dri / < minor > / rd > logfile . rd
*
* To log the cmdstream in a format that is understood by freedreno / cffdump
* utility . By comparing the last successfully completed fence # , to the
* cmdstream for the next fence , you can narrow down which process and submit
* caused the gpu crash / lockup .
*
* This bypasses drm_debugfs_create_files ( ) mainly because we need to use
* our own fops for a bit more control . In particular , we don ' t want to
* do anything if userspace doesn ' t have the debugfs file open .
2016-06-16 11:54:41 -04:00
*
* The module - param " rd_full " , which defaults to false , enables snapshotting
* all ( non - written ) buffers in the submit , rather than just cmdstream bo ' s .
* This is useful to capture the contents of ( for example ) vbo ' s or textures ,
* or shader programs ( if not emitted inline in cmdstream ) .
2014-05-30 14:47:38 -04:00
*/
# ifdef CONFIG_DEBUG_FS
# include <linux/kfifo.h>
# include <linux/debugfs.h>
# include <linux/circ_buf.h>
# include <linux/wait.h>
# include "msm_drv.h"
# include "msm_gpu.h"
# include "msm_gem.h"
2016-06-16 11:54:41 -04:00
static bool rd_full = false ;
MODULE_PARM_DESC ( rd_full , " If true, $debugfs/.../rd will snapshot all buffer contents " ) ;
module_param_named ( rd_full , rd_full , bool , 0600 ) ;
2014-05-30 14:47:38 -04:00
enum rd_sect_type {
RD_NONE ,
RD_TEST , /* ascii text */
RD_CMD , /* ascii text */
RD_GPUADDR , /* u32 gpuaddr, u32 size */
RD_CONTEXT , /* raw dump */
RD_CMDSTREAM , /* raw dump */
RD_CMDSTREAM_ADDR , /* gpu addr of cmdstream */
RD_PARAM , /* u32 param_type, u32 param_val, u32 bitlen */
RD_FLUSH , /* empty, clear previous params */
RD_PROGRAM , /* shader program, raw dump */
RD_VERT_SHADER ,
RD_FRAG_SHADER ,
RD_BUFFER_CONTENTS ,
RD_GPU_ID ,
} ;
# define BUF_SZ 512 /* should be power of 2 */
/* space used: */
# define circ_count(circ) \
( CIRC_CNT ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
# define circ_count_to_end(circ) \
( CIRC_CNT_TO_END ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
/* space available: */
# define circ_space(circ) \
( CIRC_SPACE ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
# define circ_space_to_end(circ) \
( CIRC_SPACE_TO_END ( ( circ ) - > head , ( circ ) - > tail , BUF_SZ ) )
struct msm_rd_state {
struct drm_device * dev ;
bool open ;
/* current submit to read out: */
struct msm_gem_submit * submit ;
/* fifo access is synchronized on the producer side by
* struct_mutex held by submit code ( otherwise we could
* end up w / cmds logged in different order than they
* were executed ) . And read_lock synchronizes the reads
*/
struct mutex read_lock ;
wait_queue_head_t fifo_event ;
struct circ_buf fifo ;
char buf [ BUF_SZ ] ;
} ;
static void rd_write ( struct msm_rd_state * rd , const void * buf , int sz )
{
struct circ_buf * fifo = & rd - > fifo ;
const char * ptr = buf ;
while ( sz > 0 ) {
char * fptr = & fifo - > buf [ fifo - > head ] ;
int n ;
wait_event ( rd - > fifo_event , circ_space ( & rd - > fifo ) > 0 ) ;
n = min ( sz , circ_space_to_end ( & rd - > fifo ) ) ;
memcpy ( fptr , ptr , n ) ;
fifo - > head = ( fifo - > head + n ) & ( BUF_SZ - 1 ) ;
sz - = n ;
ptr + = n ;
wake_up_all ( & rd - > fifo_event ) ;
}
}
static void rd_write_section ( struct msm_rd_state * rd ,
enum rd_sect_type type , const void * buf , int sz )
{
rd_write ( rd , & type , 4 ) ;
rd_write ( rd , & sz , 4 ) ;
rd_write ( rd , buf , sz ) ;
}
static ssize_t rd_read ( struct file * file , char __user * buf ,
size_t sz , loff_t * ppos )
{
struct msm_rd_state * rd = file - > private_data ;
struct circ_buf * fifo = & rd - > fifo ;
const char * fptr = & fifo - > buf [ fifo - > tail ] ;
int n = 0 , ret = 0 ;
mutex_lock ( & rd - > read_lock ) ;
ret = wait_event_interruptible ( rd - > fifo_event ,
circ_count ( & rd - > fifo ) > 0 ) ;
if ( ret )
goto out ;
n = min_t ( int , sz , circ_count_to_end ( & rd - > fifo ) ) ;
2016-07-13 13:35:29 +03:00
if ( copy_to_user ( buf , fptr , n ) ) {
ret = - EFAULT ;
2014-05-30 14:47:38 -04:00
goto out ;
2016-07-13 13:35:29 +03:00
}
2014-05-30 14:47:38 -04:00
fifo - > tail = ( fifo - > tail + n ) & ( BUF_SZ - 1 ) ;
* ppos + = n ;
wake_up_all ( & rd - > fifo_event ) ;
out :
mutex_unlock ( & rd - > read_lock ) ;
if ( ret )
return ret ;
return n ;
}
static int rd_open ( struct inode * inode , struct file * file )
{
struct msm_rd_state * rd = inode - > i_private ;
struct drm_device * dev = rd - > dev ;
struct msm_drm_private * priv = dev - > dev_private ;
struct msm_gpu * gpu = priv - > gpu ;
uint64_t val ;
uint32_t gpu_id ;
int ret = 0 ;
mutex_lock ( & dev - > struct_mutex ) ;
if ( rd - > open | | ! gpu ) {
ret = - EBUSY ;
goto out ;
}
file - > private_data = rd ;
rd - > open = true ;
/* the parsing tools need to know gpu-id to know which
* register database to load .
*/
gpu - > funcs - > get_param ( gpu , MSM_PARAM_GPU_ID , & val ) ;
gpu_id = val ;
rd_write_section ( rd , RD_GPU_ID , & gpu_id , sizeof ( gpu_id ) ) ;
out :
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
static int rd_release ( struct inode * inode , struct file * file )
{
struct msm_rd_state * rd = inode - > i_private ;
rd - > open = false ;
return 0 ;
}
static const struct file_operations rd_debugfs_fops = {
. owner = THIS_MODULE ,
. open = rd_open ,
. read = rd_read ,
. llseek = no_llseek ,
. release = rd_release ,
} ;
int msm_rd_debugfs_init ( struct drm_minor * minor )
{
struct msm_drm_private * priv = minor - > dev - > dev_private ;
struct msm_rd_state * rd ;
2017-01-26 23:56:11 +01:00
struct dentry * ent ;
2014-05-30 14:47:38 -04:00
/* only create on first minor: */
if ( priv - > rd )
return 0 ;
rd = kzalloc ( sizeof ( * rd ) , GFP_KERNEL ) ;
if ( ! rd )
return - ENOMEM ;
rd - > dev = minor - > dev ;
rd - > fifo . buf = rd - > buf ;
mutex_init ( & rd - > read_lock ) ;
priv - > rd = rd ;
init_waitqueue_head ( & rd - > fifo_event ) ;
2017-01-26 23:56:11 +01:00
ent = debugfs_create_file ( " rd " , S_IFREG | S_IRUGO ,
2014-05-30 14:47:38 -04:00
minor - > debugfs_root , rd , & rd_debugfs_fops ) ;
2017-01-26 23:56:11 +01:00
if ( ! ent ) {
2016-08-07 12:22:20 -04:00
DRM_ERROR ( " Cannot create /sys/kernel/debug/dri/%pd/rd \n " ,
minor - > debugfs_root ) ;
2014-05-30 14:47:38 -04:00
goto fail ;
}
return 0 ;
fail :
2017-03-07 21:49:22 +01:00
msm_rd_debugfs_cleanup ( priv ) ;
2014-05-30 14:47:38 -04:00
return - 1 ;
}
2017-03-07 21:49:22 +01:00
void msm_rd_debugfs_cleanup ( struct msm_drm_private * priv )
2014-05-30 14:47:38 -04:00
{
struct msm_rd_state * rd = priv - > rd ;
if ( ! rd )
return ;
priv - > rd = NULL ;
mutex_destroy ( & rd - > read_lock ) ;
kfree ( rd ) ;
}
2016-06-16 11:49:09 -04:00
static void snapshot_buf ( struct msm_rd_state * rd ,
struct msm_gem_submit * submit , int idx ,
2016-11-11 11:08:45 -05:00
uint64_t iova , uint32_t size )
2016-06-16 11:49:09 -04:00
{
struct msm_gem_object * obj = submit - > bos [ idx ] . obj ;
const char * buf ;
2017-06-13 16:52:54 -06:00
buf = msm_gem_get_vaddr ( & obj - > base ) ;
2016-06-16 11:49:09 -04:00
if ( IS_ERR ( buf ) )
return ;
2016-06-16 11:54:41 -04:00
if ( iova ) {
buf + = iova - submit - > bos [ idx ] . iova ;
} else {
iova = submit - > bos [ idx ] . iova ;
size = obj - > base . size ;
}
2016-06-16 11:49:09 -04:00
rd_write_section ( rd , RD_GPUADDR ,
2016-11-11 11:08:45 -05:00
( uint32_t [ 3 ] ) { iova , size , iova > > 32 } , 12 ) ;
2016-06-16 11:49:09 -04:00
rd_write_section ( rd , RD_BUFFER_CONTENTS , buf , size ) ;
2017-06-13 16:52:54 -06:00
msm_gem_put_vaddr ( & obj - > base ) ;
2016-06-16 11:49:09 -04:00
}
2014-05-30 14:47:38 -04:00
/* called under struct_mutex */
void msm_rd_dump_submit ( struct msm_gem_submit * submit )
{
struct drm_device * dev = submit - > dev ;
struct msm_drm_private * priv = dev - > dev_private ;
struct msm_rd_state * rd = priv - > rd ;
char msg [ 128 ] ;
int i , n ;
if ( ! rd - > open )
return ;
/* writing into fifo is serialized by caller, and
* rd - > read_lock is used to serialize the reads
*/
WARN_ON ( ! mutex_is_locked ( & dev - > struct_mutex ) ) ;
n = snprintf ( msg , sizeof ( msg ) , " %.*s/%d: fence=%u " ,
TASK_COMM_LEN , current - > comm , task_pid_nr ( current ) ,
2016-03-15 18:26:28 -04:00
submit - > fence - > seqno ) ;
2014-05-30 14:47:38 -04:00
rd_write_section ( rd , RD_CMD , msg , ALIGN ( n , 4 ) ) ;
2016-06-16 11:54:41 -04:00
if ( rd_full ) {
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
/* buffers that are written to probably don't start out
* with anything interesting :
*/
if ( submit - > bos [ i ] . flags & MSM_SUBMIT_BO_WRITE )
continue ;
snapshot_buf ( rd , submit , i , 0 , 0 ) ;
}
}
2014-05-30 14:47:38 -04:00
for ( i = 0 ; i < submit - > nr_cmds ; i + + ) {
2017-03-07 09:50:30 -07:00
uint64_t iova = submit - > cmd [ i ] . iova ;
2014-05-30 14:47:38 -04:00
uint32_t szd = submit - > cmd [ i ] . size ; /* in dwords */
2016-05-24 18:29:38 -04:00
2016-06-16 11:54:41 -04:00
/* snapshot cmdstream bo's (if we haven't already): */
if ( ! rd_full ) {
snapshot_buf ( rd , submit , submit - > cmd [ i ] . idx ,
submit - > cmd [ i ] . iova , szd * 4 ) ;
}
2014-05-30 14:47:38 -04:00
switch ( submit - > cmd [ i ] . type ) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF :
/* ignore IB-targets, we've logged the buffer, the
* parser tool will follow the IB based on the logged
* buffer / gpuaddr , so nothing more to do .
*/
break ;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF :
case MSM_SUBMIT_CMD_BUF :
rd_write_section ( rd , RD_CMDSTREAM_ADDR ,
2017-03-07 09:50:30 -07:00
( uint32_t [ 3 ] ) { iova , szd , iova > > 32 } , 12 ) ;
2014-05-30 14:47:38 -04:00
break ;
}
}
}
# endif