2013-08-13 13:56:54 +04:00
/*
* Copyright 2010 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Alex Deucher
*/
2019-06-08 11:02:41 +03:00
2013-08-13 13:56:54 +04:00
# include "radeon.h"
# include "radeon_asic.h"
# include "evergreend.h"
u32 evergreen_gpu_check_soft_reset ( struct radeon_device * rdev ) ;
/**
* evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
*
* @ rdev : radeon_device pointer
* @ fence : radeon fence object
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed ( evergreen - SI ) .
*/
void evergreen_dma_fence_ring_emit ( struct radeon_device * rdev ,
struct radeon_fence * fence )
{
struct radeon_ring * ring = & rdev - > ring [ fence - > ring ] ;
u64 addr = rdev - > fence_drv [ fence - > ring ] . gpu_addr ;
/* write the fence */
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_FENCE , 0 , 0 ) ) ;
radeon_ring_write ( ring , addr & 0xfffffffc ) ;
radeon_ring_write ( ring , ( upper_32_bits ( addr ) & 0xff ) ) ;
radeon_ring_write ( ring , fence - > seq ) ;
/* generate an interrupt */
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_TRAP , 0 , 0 ) ) ;
/* flush HDP */
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_SRBM_WRITE , 0 , 0 ) ) ;
radeon_ring_write ( ring , ( 0xf < < 16 ) | ( HDP_MEM_COHERENCY_FLUSH_CNTL > > 2 ) ) ;
radeon_ring_write ( ring , 1 ) ;
}
/**
* evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
*
* @ rdev : radeon_device pointer
* @ ib : IB object to schedule
*
* Schedule an IB in the DMA ring ( evergreen ) .
*/
void evergreen_dma_ring_ib_execute ( struct radeon_device * rdev ,
struct radeon_ib * ib )
{
struct radeon_ring * ring = & rdev - > ring [ ib - > ring ] ;
if ( rdev - > wb . enabled ) {
u32 next_rptr = ring - > wptr + 4 ;
while ( ( next_rptr & 7 ) ! = 5 )
next_rptr + + ;
next_rptr + = 3 ;
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_WRITE , 0 , 1 ) ) ;
radeon_ring_write ( ring , ring - > next_rptr_gpu_addr & 0xfffffffc ) ;
radeon_ring_write ( ring , upper_32_bits ( ring - > next_rptr_gpu_addr ) & 0xff ) ;
radeon_ring_write ( ring , next_rptr ) ;
}
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs .
*/
while ( ( ring - > wptr & 7 ) ! = 5 )
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_NOP , 0 , 0 ) ) ;
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_INDIRECT_BUFFER , 0 , 0 ) ) ;
radeon_ring_write ( ring , ( ib - > gpu_addr & 0xFFFFFFE0 ) ) ;
radeon_ring_write ( ring , ( ib - > length_dw < < 12 ) | ( upper_32_bits ( ib - > gpu_addr ) & 0xFF ) ) ;
}
/**
* evergreen_copy_dma - copy pages using the DMA engine
*
* @ rdev : radeon_device pointer
* @ src_offset : src GPU address
* @ dst_offset : dst GPU address
* @ num_gpu_pages : number of GPU pages to xfer
* @ fence : radeon fence object
*
* Copy GPU paging using the DMA engine ( evergreen - cayman ) .
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback .
*/
2014-09-04 22:01:53 +04:00
struct radeon_fence * evergreen_copy_dma ( struct radeon_device * rdev ,
uint64_t src_offset ,
uint64_t dst_offset ,
unsigned num_gpu_pages ,
2019-08-11 11:06:32 +03:00
struct dma_resv * resv )
2013-08-13 13:56:54 +04:00
{
2014-09-04 22:01:53 +04:00
struct radeon_fence * fence ;
2014-11-19 16:01:22 +03:00
struct radeon_sync sync ;
2013-08-13 13:56:54 +04:00
int ring_index = rdev - > asic - > copy . dma_ring_index ;
struct radeon_ring * ring = & rdev - > ring [ ring_index ] ;
u32 size_in_dw , cur_size_in_dw ;
int i , num_loops ;
int r = 0 ;
2014-11-19 16:01:22 +03:00
radeon_sync_create ( & sync ) ;
2013-08-13 13:56:54 +04:00
size_in_dw = ( num_gpu_pages < < RADEON_GPU_PAGE_SHIFT ) / 4 ;
num_loops = DIV_ROUND_UP ( size_in_dw , 0xfffff ) ;
r = radeon_ring_lock ( rdev , ring , num_loops * 5 + 11 ) ;
if ( r ) {
DRM_ERROR ( " radeon: moving bo (%d). \n " , r ) ;
2014-11-19 16:01:22 +03:00
radeon_sync_free ( rdev , & sync , NULL ) ;
2014-09-04 22:01:53 +04:00
return ERR_PTR ( r ) ;
2013-08-13 13:56:54 +04:00
}
2014-11-19 16:01:22 +03:00
radeon_sync_resv ( rdev , & sync , resv , false ) ;
radeon_sync_rings ( rdev , & sync , ring - > idx ) ;
2013-08-13 13:56:54 +04:00
for ( i = 0 ; i < num_loops ; i + + ) {
cur_size_in_dw = size_in_dw ;
if ( cur_size_in_dw > 0xFFFFF )
cur_size_in_dw = 0xFFFFF ;
size_in_dw - = cur_size_in_dw ;
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_COPY , 0 , cur_size_in_dw ) ) ;
radeon_ring_write ( ring , dst_offset & 0xfffffffc ) ;
radeon_ring_write ( ring , src_offset & 0xfffffffc ) ;
radeon_ring_write ( ring , upper_32_bits ( dst_offset ) & 0xff ) ;
radeon_ring_write ( ring , upper_32_bits ( src_offset ) & 0xff ) ;
src_offset + = cur_size_in_dw * 4 ;
dst_offset + = cur_size_in_dw * 4 ;
}
2014-09-04 22:01:53 +04:00
r = radeon_fence_emit ( rdev , & fence , ring - > idx ) ;
2013-08-13 13:56:54 +04:00
if ( r ) {
radeon_ring_unlock_undo ( rdev , ring ) ;
2014-11-19 16:01:22 +03:00
radeon_sync_free ( rdev , & sync , NULL ) ;
2014-09-04 22:01:53 +04:00
return ERR_PTR ( r ) ;
2013-08-13 13:56:54 +04:00
}
2014-08-18 12:34:55 +04:00
radeon_ring_unlock_commit ( rdev , ring , false ) ;
2014-11-19 16:01:22 +03:00
radeon_sync_free ( rdev , & sync , fence ) ;
2013-08-13 13:56:54 +04:00
2014-09-04 22:01:53 +04:00
return fence ;
2013-08-13 13:56:54 +04:00
}
/**
* evergreen_dma_is_lockup - Check if the DMA engine is locked up
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
*
* Check if the async DMA engine is locked up .
* Returns true if the engine appears to be locked up , false if not .
*/
bool evergreen_dma_is_lockup ( struct radeon_device * rdev , struct radeon_ring * ring )
{
u32 reset_mask = evergreen_gpu_check_soft_reset ( rdev ) ;
if ( ! ( reset_mask & RADEON_RESET_DMA ) ) {
2014-02-18 17:52:33 +04:00
radeon_ring_lockup_update ( rdev , ring ) ;
2013-08-13 13:56:54 +04:00
return false ;
}
return radeon_ring_test_lockup ( rdev , ring ) ;
}