2009-06-05 14:42:42 +02:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
2012-05-09 15:34:58 +02:00
* Christian König
2009-06-05 14:42:42 +02:00
*/
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
2009-06-05 14:42:42 +02:00
# include "radeon.h"
2012-05-02 15:11:12 +02:00
2009-06-05 14:42:42 +02:00
/*
2012-07-17 14:02:38 -04:00
* Rings
* Most engines on the GPU are fed via ring buffers . Ring
* buffers are areas of GPU accessible memory that the host
* writes commands into and the GPU reads commands out of .
* There is a rptr ( read pointer ) that determines where the
* GPU is currently reading , and a wptr ( write pointer )
* which determines where the host has written . When the
* pointers are equal , the ring is idle . When the host
* writes commands to the ring buffer , it increments the
* wptr . The GPU then starts fetching commands and executes
* them until the pointers are equal again .
2009-06-05 14:42:42 +02:00
*/
2012-08-31 13:43:50 -04:00
static int radeon_debugfs_ring_init ( struct radeon_device * rdev , struct radeon_ring * ring ) ;
2012-05-09 15:34:58 +02:00
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
*
* Check if a specific ring supports writing to scratch registers ( all asics ) .
* Returns true if the ring supports writing to scratch regs , false if not .
*/
2012-07-17 14:02:31 -04:00
bool radeon_ring_supports_scratch_reg ( struct radeon_device * rdev ,
struct radeon_ring * ring )
{
switch ( ring - > idx ) {
case RADEON_RING_TYPE_GFX_INDEX :
case CAYMAN_RING_TYPE_CP1_INDEX :
case CAYMAN_RING_TYPE_CP2_INDEX :
return true ;
default :
return false ;
}
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_free_size - update the free size
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
*
* Update the free dw slots in the ring buffer ( all asics ) .
*/
2011-10-23 12:56:27 +02:00
void radeon_ring_free_size ( struct radeon_device * rdev , struct radeon_ring * ring )
2009-06-05 14:42:42 +02:00
{
2014-02-18 14:52:33 +01:00
uint32_t rptr = radeon_ring_get_rptr ( rdev , ring ) ;
2009-06-05 14:42:42 +02:00
/* This works because ring_size is a power of 2 */
2014-02-18 14:52:33 +01:00
ring - > ring_free_dw = rptr + ( ring - > ring_size / 4 ) ;
2011-10-23 12:56:27 +02:00
ring - > ring_free_dw - = ring - > wptr ;
ring - > ring_free_dw & = ring - > ptr_mask ;
if ( ! ring - > ring_free_dw ) {
2014-02-18 15:03:22 +01:00
/* this is an empty ring */
2011-10-23 12:56:27 +02:00
ring - > ring_free_dw = ring - > ring_size / 4 ;
2014-02-18 15:03:22 +01:00
/* update lockup info to avoid false positive */
radeon_ring_lockup_update ( rdev , ring ) ;
2009-06-05 14:42:42 +02:00
}
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_alloc - allocate space on the ring buffer
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
* @ ndw : number of dwords to allocate in the ring buffer
*
* Allocate @ ndw dwords in the ring buffer ( all asics ) .
* Returns 0 on success , error on failure .
*/
2011-10-23 12:56:27 +02:00
int radeon_ring_alloc ( struct radeon_device * rdev , struct radeon_ring * ring , unsigned ndw )
2009-06-05 14:42:42 +02:00
{
int r ;
2013-01-30 14:24:09 -05:00
/* make sure we aren't trying to allocate more space than there is on the ring */
if ( ndw > ( ring - > ring_size / 4 ) )
return - ENOMEM ;
2009-06-05 14:42:42 +02:00
/* Align requested size with padding so unlock_commit can
* pad safely */
2013-06-19 10:02:28 -04:00
radeon_ring_free_size ( rdev , ring ) ;
2011-10-23 12:56:27 +02:00
ndw = ( ndw + ring - > align_mask ) & ~ ring - > align_mask ;
while ( ndw > ( ring - > ring_free_dw - 1 ) ) {
radeon_ring_free_size ( rdev , ring ) ;
if ( ndw < ring - > ring_free_dw ) {
2009-06-05 14:42:42 +02:00
break ;
}
2014-02-18 15:58:31 +01:00
r = radeon_fence_wait_next ( rdev , ring - > idx ) ;
2010-04-30 15:24:17 -04:00
if ( r )
2009-06-05 14:42:42 +02:00
return r ;
}
2011-10-23 12:56:27 +02:00
ring - > count_dw = ndw ;
ring - > wptr_old = ring - > wptr ;
2009-06-05 14:42:42 +02:00
return 0 ;
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_lock - lock the ring and allocate space on it
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
* @ ndw : number of dwords to allocate in the ring buffer
*
* Lock the ring and allocate @ ndw dwords in the ring buffer
* ( all asics ) .
* Returns 0 on success , error on failure .
*/
2011-10-23 12:56:27 +02:00
int radeon_ring_lock ( struct radeon_device * rdev , struct radeon_ring * ring , unsigned ndw )
2010-04-30 15:24:17 -04:00
{
int r ;
2012-05-09 15:34:45 +02:00
mutex_lock ( & rdev - > ring_lock ) ;
2011-10-23 12:56:27 +02:00
r = radeon_ring_alloc ( rdev , ring , ndw ) ;
2010-04-30 15:24:17 -04:00
if ( r ) {
2012-05-09 15:34:45 +02:00
mutex_unlock ( & rdev - > ring_lock ) ;
2010-04-30 15:24:17 -04:00
return r ;
}
return 0 ;
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
2014-08-18 17:34:55 +09:00
* @ hdp_flush : Whether or not to perform an HDP cache flush
2012-07-17 14:02:38 -04:00
*
* Update the wptr ( write pointer ) to tell the GPU to
* execute new commands on the ring buffer ( all asics ) .
*/
2014-08-18 17:34:55 +09:00
void radeon_ring_commit ( struct radeon_device * rdev , struct radeon_ring * ring ,
bool hdp_flush )
2009-06-05 14:42:42 +02:00
{
2014-07-31 18:43:49 +09:00
/* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding .
*/
2014-08-18 17:34:55 +09:00
if ( hdp_flush & & rdev - > asic - > ring [ ring - > idx ] - > hdp_flush )
2014-07-31 18:43:49 +09:00
rdev - > asic - > ring [ ring - > idx ] - > hdp_flush ( rdev , ring ) ;
2009-06-05 14:42:42 +02:00
/* We pad to match fetch size */
2012-07-07 12:11:32 +02:00
while ( ring - > wptr & ring - > align_mask ) {
2011-11-17 14:25:56 -05:00
radeon_ring_write ( ring , ring - > nop ) ;
2009-06-05 14:42:42 +02:00
}
2013-12-11 11:34:45 +01:00
mb ( ) ;
2014-07-31 18:43:49 +09:00
/* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished .
*/
2014-08-18 17:34:55 +09:00
if ( hdp_flush & & rdev - > asic - > mmio_hdp_flush )
2014-07-31 18:43:49 +09:00
rdev - > asic - > mmio_hdp_flush ( rdev ) ;
2013-01-29 14:10:56 -05:00
radeon_ring_set_wptr ( rdev , ring ) ;
2010-04-30 15:24:17 -04:00
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_unlock_commit - tell the GPU to execute the new
* commands on the ring buffer and unlock it
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
2014-08-18 17:34:55 +09:00
* @ hdp_flush : Whether or not to perform an HDP cache flush
2012-07-17 14:02:38 -04:00
*
* Call radeon_ring_commit ( ) then unlock the ring ( all asics ) .
*/
2014-08-18 17:34:55 +09:00
void radeon_ring_unlock_commit ( struct radeon_device * rdev , struct radeon_ring * ring ,
bool hdp_flush )
2010-04-30 15:24:17 -04:00
{
2014-08-18 17:34:55 +09:00
radeon_ring_commit ( rdev , ring , hdp_flush ) ;
2012-05-09 15:34:45 +02:00
mutex_unlock ( & rdev - > ring_lock ) ;
2009-06-05 14:42:42 +02:00
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_undo - reset the wptr
*
* @ ring : radeon_ring structure holding ring information
*
2012-11-20 22:31:06 +01:00
* Reset the driver ' s copy of the wptr ( all asics ) .
2012-07-17 14:02:38 -04:00
*/
2012-05-09 15:34:45 +02:00
void radeon_ring_undo ( struct radeon_ring * ring )
2009-06-05 14:42:42 +02:00
{
2011-10-23 12:56:27 +02:00
ring - > wptr = ring - > wptr_old ;
2012-05-09 15:34:45 +02:00
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_unlock_undo - reset the wptr and unlock the ring
*
* @ ring : radeon_ring structure holding ring information
*
* Call radeon_ring_undo ( ) then unlock the ring ( all asics ) .
*/
2012-05-09 15:34:45 +02:00
void radeon_ring_unlock_undo ( struct radeon_device * rdev , struct radeon_ring * ring )
{
radeon_ring_undo ( ring ) ;
mutex_unlock ( & rdev - > ring_lock ) ;
2009-06-05 14:42:42 +02:00
}
2012-07-17 14:02:38 -04:00
/**
2012-11-20 22:31:06 +01:00
* radeon_ring_lockup_update - update lockup variables
2012-07-17 14:02:38 -04:00
*
* @ ring : radeon_ring structure holding ring information
*
* Update the last rptr value and timestamp ( all asics ) .
*/
2014-02-18 14:52:33 +01:00
void radeon_ring_lockup_update ( struct radeon_device * rdev ,
struct radeon_ring * ring )
2012-05-02 15:11:20 +02:00
{
2014-02-18 15:24:06 +01:00
atomic_set ( & ring - > last_rptr , radeon_ring_get_rptr ( rdev , ring ) ) ;
atomic64_set ( & ring - > last_activity , jiffies_64 ) ;
2012-05-02 15:11:20 +02:00
}
/**
* radeon_ring_test_lockup ( ) - check if ring is lockedup by recording information
* @ rdev : radeon device structure
* @ ring : radeon_ring structure holding ring information
*
2014-02-18 12:37:50 +01:00
*/
2012-05-02 15:11:20 +02:00
bool radeon_ring_test_lockup ( struct radeon_device * rdev , struct radeon_ring * ring )
{
2014-02-18 14:52:33 +01:00
uint32_t rptr = radeon_ring_get_rptr ( rdev , ring ) ;
2014-02-18 15:24:06 +01:00
uint64_t last = atomic64_read ( & ring - > last_activity ) ;
uint64_t elapsed ;
2012-05-02 15:11:20 +02:00
2014-02-18 15:24:06 +01:00
if ( rptr ! = atomic_read ( & ring - > last_rptr ) ) {
/* ring is still working, no lockup */
2014-02-18 14:52:33 +01:00
radeon_ring_lockup_update ( rdev , ring ) ;
2012-05-02 15:11:20 +02:00
return false ;
}
2014-02-18 15:24:06 +01:00
elapsed = jiffies_to_msecs ( jiffies_64 - last ) ;
2012-05-02 15:11:21 +02:00
if ( radeon_lockup_timeout & & elapsed > = radeon_lockup_timeout ) {
2014-02-18 15:24:06 +01:00
dev_err ( rdev - > dev , " ring %d stalled for more than %llumsec \n " ,
ring - > idx , elapsed ) ;
2012-05-02 15:11:20 +02:00
return true ;
}
/* give a chance to the GPU ... */
return false ;
}
2012-07-09 11:52:44 +02:00
/**
* radeon_ring_backup - Back up the content of a ring
*
* @ rdev : radeon_device pointer
* @ ring : the ring we want to back up
*
* Saves all unprocessed commits from a ring , returns the number of dwords saved .
*/
unsigned radeon_ring_backup ( struct radeon_device * rdev , struct radeon_ring * ring ,
uint32_t * * data )
{
unsigned size , ptr , i ;
/* just in case lock the ring */
mutex_lock ( & rdev - > ring_lock ) ;
* data = NULL ;
2012-07-17 14:02:31 -04:00
if ( ring - > ring_obj = = NULL ) {
2012-07-09 11:52:44 +02:00
mutex_unlock ( & rdev - > ring_lock ) ;
return 0 ;
}
/* it doesn't make sense to save anything if all fences are signaled */
2012-07-17 14:02:30 -04:00
if ( ! radeon_fence_count_emitted ( rdev , ring - > idx ) ) {
2012-07-09 11:52:44 +02:00
mutex_unlock ( & rdev - > ring_lock ) ;
return 0 ;
}
/* calculate the number of dw on the ring */
2012-07-17 14:02:31 -04:00
if ( ring - > rptr_save_reg )
ptr = RREG32 ( ring - > rptr_save_reg ) ;
else if ( rdev - > wb . enabled )
ptr = le32_to_cpu ( * ring - > next_rptr_cpu_addr ) ;
else {
/* no way to read back the next rptr */
mutex_unlock ( & rdev - > ring_lock ) ;
return 0 ;
}
2012-07-09 11:52:44 +02:00
size = ring - > wptr + ( ring - > ring_size / 4 ) ;
size - = ptr ;
size & = ring - > ptr_mask ;
if ( size = = 0 ) {
mutex_unlock ( & rdev - > ring_lock ) ;
return 0 ;
}
/* and then save the content of the ring */
2014-10-20 18:40:54 +09:00
* data = drm_malloc_ab ( size , sizeof ( uint32_t ) ) ;
2012-07-20 14:17:00 +03:00
if ( ! * data ) {
mutex_unlock ( & rdev - > ring_lock ) ;
return 0 ;
}
2012-07-09 11:52:44 +02:00
for ( i = 0 ; i < size ; + + i ) {
( * data ) [ i ] = ring - > ring [ ptr + + ] ;
ptr & = ring - > ptr_mask ;
}
mutex_unlock ( & rdev - > ring_lock ) ;
return size ;
}
/**
* radeon_ring_restore - append saved commands to the ring again
*
* @ rdev : radeon_device pointer
* @ ring : ring to append commands to
* @ size : number of dwords we want to write
* @ data : saved commands
*
* Allocates space on the ring and restore the previously saved commands .
*/
int radeon_ring_restore ( struct radeon_device * rdev , struct radeon_ring * ring ,
unsigned size , uint32_t * data )
{
int i , r ;
if ( ! size | | ! data )
return 0 ;
/* restore the saved ring content */
r = radeon_ring_lock ( rdev , ring , size ) ;
if ( r )
return r ;
for ( i = 0 ; i < size ; + + i ) {
radeon_ring_write ( ring , data [ i ] ) ;
}
2014-08-18 17:34:55 +09:00
radeon_ring_unlock_commit ( rdev , ring , false ) ;
2014-10-20 18:40:54 +09:00
drm_free_large ( data ) ;
2012-07-09 11:52:44 +02:00
return 0 ;
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_init - init driver ring struct .
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
* @ ring_size : size of the ring
* @ rptr_offs : offset of the rptr writeback location in the WB buffer
* @ nop : nop packet for this ring
*
* Initialize the driver information for the selected ring ( all asics ) .
* Returns 0 on success , error on failure .
*/
2011-10-23 12:56:27 +02:00
int radeon_ring_init ( struct radeon_device * rdev , struct radeon_ring * ring , unsigned ring_size ,
2013-12-09 19:44:30 -05:00
unsigned rptr_offs , u32 nop )
2009-06-05 14:42:42 +02:00
{
int r ;
2011-10-23 12:56:27 +02:00
ring - > ring_size = ring_size ;
ring - > rptr_offs = rptr_offs ;
2011-11-17 14:25:56 -05:00
ring - > nop = nop ;
2009-06-05 14:42:42 +02:00
/* Allocate ring buffer */
2011-10-23 12:56:27 +02:00
if ( ring - > ring_obj = = NULL ) {
r = radeon_bo_create ( rdev , ring - > ring_size , PAGE_SIZE , true ,
2014-09-18 14:11:56 +02:00
RADEON_GEM_DOMAIN_GTT , 0 , NULL ,
2012-05-10 18:33:13 -04:00
NULL , & ring - > ring_obj ) ;
2009-06-05 14:42:42 +02:00
if ( r ) {
2009-11-20 14:29:23 +01:00
dev_err ( rdev - > dev , " (%d) ring create failed \n " , r ) ;
2009-06-05 14:42:42 +02:00
return r ;
}
2011-10-23 12:56:27 +02:00
r = radeon_bo_reserve ( ring - > ring_obj , false ) ;
2009-11-20 14:29:23 +01:00
if ( unlikely ( r ! = 0 ) )
return r ;
2011-10-23 12:56:27 +02:00
r = radeon_bo_pin ( ring - > ring_obj , RADEON_GEM_DOMAIN_GTT ,
& ring - > gpu_addr ) ;
2009-06-05 14:42:42 +02:00
if ( r ) {
2011-10-23 12:56:27 +02:00
radeon_bo_unreserve ( ring - > ring_obj ) ;
2009-11-20 14:29:23 +01:00
dev_err ( rdev - > dev , " (%d) ring pin failed \n " , r ) ;
2009-06-05 14:42:42 +02:00
return r ;
}
2011-10-23 12:56:27 +02:00
r = radeon_bo_kmap ( ring - > ring_obj ,
( void * * ) & ring - > ring ) ;
radeon_bo_unreserve ( ring - > ring_obj ) ;
2009-06-05 14:42:42 +02:00
if ( r ) {
2009-11-20 14:29:23 +01:00
dev_err ( rdev - > dev , " (%d) ring map failed \n " , r ) ;
2009-06-05 14:42:42 +02:00
return r ;
}
}
2011-10-23 12:56:27 +02:00
ring - > ptr_mask = ( ring - > ring_size / 4 ) - 1 ;
ring - > ring_free_dw = ring - > ring_size / 4 ;
2012-07-17 14:02:31 -04:00
if ( rdev - > wb . enabled ) {
u32 index = RADEON_WB_RING0_NEXT_RPTR + ( ring - > idx * 4 ) ;
ring - > next_rptr_gpu_addr = rdev - > wb . gpu_addr + index ;
ring - > next_rptr_cpu_addr = & rdev - > wb . wb [ index / 4 ] ;
}
2012-05-02 15:11:11 +02:00
if ( radeon_debugfs_ring_init ( rdev , ring ) ) {
DRM_ERROR ( " Failed to register debugfs file for rings ! \n " ) ;
}
2014-02-18 14:52:33 +01:00
radeon_ring_lockup_update ( rdev , ring ) ;
2009-06-05 14:42:42 +02:00
return 0 ;
}
2012-07-17 14:02:38 -04:00
/**
* radeon_ring_fini - tear down the driver ring struct .
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
*
* Tear down the driver information for the selected ring ( all asics ) .
*/
2011-10-23 12:56:27 +02:00
void radeon_ring_fini ( struct radeon_device * rdev , struct radeon_ring * ring )
2009-06-05 14:42:42 +02:00
{
2009-11-20 14:29:23 +01:00
int r ;
2010-05-06 11:02:24 -04:00
struct radeon_bo * ring_obj ;
2009-11-20 14:29:23 +01:00
2012-05-09 15:34:45 +02:00
mutex_lock ( & rdev - > ring_lock ) ;
2011-10-23 12:56:27 +02:00
ring_obj = ring - > ring_obj ;
2012-05-09 15:34:45 +02:00
ring - > ready = false ;
2011-10-23 12:56:27 +02:00
ring - > ring = NULL ;
ring - > ring_obj = NULL ;
2012-05-09 15:34:45 +02:00
mutex_unlock ( & rdev - > ring_lock ) ;
2010-05-06 11:02:24 -04:00
if ( ring_obj ) {
r = radeon_bo_reserve ( ring_obj , false ) ;
2009-11-20 14:29:23 +01:00
if ( likely ( r = = 0 ) ) {
2010-05-06 11:02:24 -04:00
radeon_bo_kunmap ( ring_obj ) ;
radeon_bo_unpin ( ring_obj ) ;
radeon_bo_unreserve ( ring_obj ) ;
2009-11-20 14:29:23 +01:00
}
2010-05-06 11:02:24 -04:00
radeon_bo_unref ( & ring_obj ) ;
2009-06-05 14:42:42 +02:00
}
}
/*
* Debugfs info
*/
# if defined(CONFIG_DEBUG_FS)
2011-10-24 17:08:44 +02:00
static int radeon_debugfs_ring_info ( struct seq_file * m , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) m - > private ;
struct drm_device * dev = node - > minor - > dev ;
struct radeon_device * rdev = dev - > dev_private ;
int ridx = * ( int * ) node - > info_ent - > data ;
struct radeon_ring * ring = & rdev - > ring [ ridx ] ;
2013-12-12 09:42:37 +01:00
uint32_t rptr , wptr , rptr_next ;
2011-10-24 17:08:44 +02:00
unsigned count , i , j ;
radeon_ring_free_size ( rdev , ring ) ;
count = ( ring - > ring_size / 4 ) - ring - > ring_free_dw ;
2013-12-12 09:42:37 +01:00
wptr = radeon_ring_get_wptr ( rdev , ring ) ;
2013-12-09 19:44:30 -05:00
seq_printf ( m , " wptr: 0x%08x [%5d] \n " ,
wptr , wptr ) ;
2013-12-12 09:42:37 +01:00
rptr = radeon_ring_get_rptr ( rdev , ring ) ;
2013-12-09 19:44:30 -05:00
seq_printf ( m , " rptr: 0x%08x [%5d] \n " ,
rptr , rptr ) ;
2013-12-12 09:42:37 +01:00
2012-07-06 16:22:55 +02:00
if ( ring - > rptr_save_reg ) {
2013-12-12 09:42:37 +01:00
rptr_next = RREG32 ( ring - > rptr_save_reg ) ;
seq_printf ( m , " rptr next(0x%04x): 0x%08x [%5d] \n " ,
ring - > rptr_save_reg , rptr_next , rptr_next ) ;
} else
rptr_next = ~ 0 ;
seq_printf ( m , " driver's copy of the wptr: 0x%08x [%5d] \n " ,
ring - > wptr , ring - > wptr ) ;
seq_printf ( m , " last semaphore signal addr : 0x%016llx \n " ,
ring - > last_semaphore_signal_addr ) ;
seq_printf ( m , " last semaphore wait addr : 0x%016llx \n " ,
ring - > last_semaphore_wait_addr ) ;
2011-10-24 17:08:44 +02:00
seq_printf ( m , " %u free dwords in ring \n " , ring - > ring_free_dw ) ;
seq_printf ( m , " %u dwords in ring \n " , count ) ;
2013-12-12 09:42:37 +01:00
2015-03-23 11:32:59 +01:00
if ( ! ring - > ring )
2013-12-12 09:42:37 +01:00
return 0 ;
2013-01-02 17:30:34 -05:00
/* print 8 dw before current rptr as often it's the last executed
* packet that is the root issue
*/
2013-12-12 09:42:37 +01:00
i = ( rptr + ring - > ptr_mask + 1 - 32 ) & ring - > ptr_mask ;
for ( j = 0 ; j < = ( count + 32 ) ; j + + ) {
seq_printf ( m , " r[%5d]=0x%08x " , i , ring - > ring [ i ] ) ;
if ( rptr = = i )
seq_puts ( m , " * " ) ;
if ( rptr_next = = i )
seq_puts ( m , " # " ) ;
seq_puts ( m , " \n " ) ;
i = ( i + 1 ) & ring - > ptr_mask ;
2011-10-24 17:08:44 +02:00
}
return 0 ;
}
2013-04-08 12:41:29 +02:00
static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX ;
static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX ;
static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX ;
static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX ;
static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX ;
static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX ;
2013-05-23 12:10:04 +02:00
static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX ;
static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX ;
2011-10-24 17:08:44 +02:00
static struct drm_info_list radeon_debugfs_ring_info_list [ ] = {
2013-04-08 12:41:29 +02:00
{ " radeon_ring_gfx " , radeon_debugfs_ring_info , 0 , & radeon_gfx_index } ,
{ " radeon_ring_cp1 " , radeon_debugfs_ring_info , 0 , & cayman_cp1_index } ,
{ " radeon_ring_cp2 " , radeon_debugfs_ring_info , 0 , & cayman_cp2_index } ,
{ " radeon_ring_dma1 " , radeon_debugfs_ring_info , 0 , & radeon_dma1_index } ,
{ " radeon_ring_dma2 " , radeon_debugfs_ring_info , 0 , & radeon_dma2_index } ,
{ " radeon_ring_uvd " , radeon_debugfs_ring_info , 0 , & r600_uvd_index } ,
2013-05-23 12:10:04 +02:00
{ " radeon_ring_vce1 " , radeon_debugfs_ring_info , 0 , & si_vce1_index } ,
{ " radeon_ring_vce2 " , radeon_debugfs_ring_info , 0 , & si_vce2_index } ,
2011-10-24 17:08:44 +02:00
} ;
2009-06-05 14:42:42 +02:00
# endif
2012-08-31 13:43:50 -04:00
static int radeon_debugfs_ring_init ( struct radeon_device * rdev , struct radeon_ring * ring )
2011-10-24 17:08:44 +02:00
{
# if defined(CONFIG_DEBUG_FS)
2012-05-02 15:11:11 +02:00
unsigned i ;
for ( i = 0 ; i < ARRAY_SIZE ( radeon_debugfs_ring_info_list ) ; + + i ) {
struct drm_info_list * info = & radeon_debugfs_ring_info_list [ i ] ;
int ridx = * ( int * ) radeon_debugfs_ring_info_list [ i ] . data ;
unsigned r ;
if ( & rdev - > ring [ ridx ] ! = ring )
continue ;
r = radeon_debugfs_add_files ( rdev , info , 1 ) ;
if ( r )
return r ;
}
2011-10-24 17:08:44 +02:00
# endif
2012-05-02 15:11:11 +02:00
return 0 ;
2011-10-24 17:08:44 +02:00
}