2013-08-13 11:56:54 +02:00
/*
* Copyright 2013 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Alex Deucher
*/
2019-06-08 10:02:41 +02:00
2013-08-13 11:56:54 +02:00
# include "radeon.h"
# include "radeon_asic.h"
2013-10-29 20:14:48 +01:00
# include "radeon_trace.h"
2013-08-13 11:56:54 +02:00
# include "sid.h"
u32 si_gpu_check_soft_reset ( struct radeon_device * rdev ) ;
/**
* si_dma_is_lockup - Check if the DMA engine is locked up
*
* @ rdev : radeon_device pointer
* @ ring : radeon_ring structure holding ring information
*
* Check if the async DMA engine is locked up .
* Returns true if the engine appears to be locked up , false if not .
*/
bool si_dma_is_lockup ( struct radeon_device * rdev , struct radeon_ring * ring )
{
u32 reset_mask = si_gpu_check_soft_reset ( rdev ) ;
u32 mask ;
if ( ring - > idx = = R600_RING_TYPE_DMA_INDEX )
mask = RADEON_RESET_DMA ;
else
mask = RADEON_RESET_DMA1 ;
if ( ! ( reset_mask & mask ) ) {
2014-02-18 14:52:33 +01:00
radeon_ring_lockup_update ( rdev , ring ) ;
2013-08-13 11:56:54 +02:00
return false ;
}
return radeon_ring_test_lockup ( rdev , ring ) ;
}
/**
2014-07-30 21:05:17 +02:00
* si_dma_vm_copy_pages - update PTEs by copying them from the GART
*
* @ rdev : radeon_device pointer
* @ ib : indirect buffer to fill with commands
* @ pe : addr of the page entry
* @ src : src addr where to copy from
* @ count : number of page entries to update
*
* Update PTEs by copying them from the GART using the DMA ( SI ) .
*/
void si_dma_vm_copy_pages ( struct radeon_device * rdev ,
struct radeon_ib * ib ,
uint64_t pe , uint64_t src ,
unsigned count )
{
while ( count ) {
unsigned bytes = count * 8 ;
if ( bytes > 0xFFFF8 )
bytes = 0xFFFF8 ;
ib - > ptr [ ib - > length_dw + + ] = DMA_PACKET ( DMA_PACKET_COPY ,
1 , 0 , 0 , bytes ) ;
ib - > ptr [ ib - > length_dw + + ] = lower_32_bits ( pe ) ;
ib - > ptr [ ib - > length_dw + + ] = lower_32_bits ( src ) ;
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( pe ) & 0xff ;
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( src ) & 0xff ;
pe + = bytes ;
src + = bytes ;
count - = bytes / 8 ;
}
}
/**
* si_dma_vm_write_pages - update PTEs by writing them manually
2013-08-13 11:56:54 +02:00
*
* @ rdev : radeon_device pointer
* @ ib : indirect buffer to fill with commands
* @ pe : addr of the page entry
* @ addr : dst addr to write into pe
* @ count : number of page entries to update
* @ incr : increase next addr by incr bytes
* @ flags : access flags
*
2014-07-30 21:05:17 +02:00
* Update PTEs by writing them manually using the DMA ( SI ) .
2013-08-13 11:56:54 +02:00
*/
2014-07-30 21:05:17 +02:00
void si_dma_vm_write_pages ( struct radeon_device * rdev ,
struct radeon_ib * ib ,
uint64_t pe ,
uint64_t addr , unsigned count ,
uint32_t incr , uint32_t flags )
2013-08-13 11:56:54 +02:00
{
uint64_t value ;
unsigned ndw ;
2014-07-30 21:05:17 +02:00
while ( count ) {
ndw = count * 2 ;
if ( ndw > 0xFFFFE )
ndw = 0xFFFFE ;
/* for non-physically contiguous pages (system) */
ib - > ptr [ ib - > length_dw + + ] = DMA_PACKET ( DMA_PACKET_WRITE , 0 , 0 , 0 , ndw ) ;
ib - > ptr [ ib - > length_dw + + ] = pe ;
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( pe ) & 0xff ;
for ( ; ndw > 0 ; ndw - = 2 , - - count , pe + = 8 ) {
if ( flags & R600_PTE_SYSTEM ) {
2013-10-30 11:51:09 -04:00
value = radeon_vm_map_gart ( rdev , addr ) ;
2014-07-30 21:05:17 +02:00
} else if ( flags & R600_PTE_VALID ) {
2013-08-13 11:56:54 +02:00
value = addr ;
2014-07-30 21:05:17 +02:00
} else {
2013-08-13 11:56:54 +02:00
value = 0 ;
2014-07-30 21:05:17 +02:00
}
addr + = incr ;
value | = flags ;
ib - > ptr [ ib - > length_dw + + ] = value ;
2013-08-13 11:56:54 +02:00
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( value ) ;
}
}
2014-07-30 21:05:17 +02:00
}
/**
* si_dma_vm_set_pages - update the page tables using the DMA
*
* @ rdev : radeon_device pointer
* @ ib : indirect buffer to fill with commands
* @ pe : addr of the page entry
* @ addr : dst addr to write into pe
* @ count : number of page entries to update
* @ incr : increase next addr by incr bytes
* @ flags : access flags
*
* Update the page tables using the DMA ( SI ) .
*/
void si_dma_vm_set_pages ( struct radeon_device * rdev ,
struct radeon_ib * ib ,
uint64_t pe ,
uint64_t addr , unsigned count ,
uint32_t incr , uint32_t flags )
{
uint64_t value ;
unsigned ndw ;
while ( count ) {
ndw = count * 2 ;
if ( ndw > 0xFFFFE )
ndw = 0xFFFFE ;
if ( flags & R600_PTE_VALID )
value = addr ;
else
value = 0 ;
/* for physically contiguous pages (vram) */
ib - > ptr [ ib - > length_dw + + ] = DMA_PTE_PDE_PACKET ( ndw ) ;
ib - > ptr [ ib - > length_dw + + ] = pe ; /* dst addr */
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( pe ) & 0xff ;
ib - > ptr [ ib - > length_dw + + ] = flags ; /* mask */
ib - > ptr [ ib - > length_dw + + ] = 0 ;
ib - > ptr [ ib - > length_dw + + ] = value ; /* value */
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( value ) ;
ib - > ptr [ ib - > length_dw + + ] = incr ; /* increment size */
ib - > ptr [ ib - > length_dw + + ] = 0 ;
pe + = ndw * 4 ;
addr + = ( ndw / 2 ) * incr ;
count - = ndw / 2 ;
}
2013-08-13 11:56:54 +02:00
}
2014-11-19 14:01:19 +01:00
void si_dma_vm_flush ( struct radeon_device * rdev , struct radeon_ring * ring ,
unsigned vm_id , uint64_t pd_addr )
2013-08-13 11:56:54 +02:00
2014-11-19 14:01:19 +01:00
{
2013-08-13 11:56:54 +02:00
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_SRBM_WRITE , 0 , 0 , 0 , 0 ) ) ;
2014-11-19 14:01:19 +01:00
if ( vm_id < 8 ) {
radeon_ring_write ( ring , ( 0xf < < 16 ) | ( ( VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + ( vm_id < < 2 ) ) > > 2 ) ) ;
2013-08-13 11:56:54 +02:00
} else {
2014-11-19 14:01:19 +01:00
radeon_ring_write ( ring , ( 0xf < < 16 ) | ( ( VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ( ( vm_id - 8 ) < < 2 ) ) > > 2 ) ) ;
2013-08-13 11:56:54 +02:00
}
2014-11-19 14:01:19 +01:00
radeon_ring_write ( ring , pd_addr > > 12 ) ;
2013-08-13 11:56:54 +02:00
/* flush hdp cache */
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_SRBM_WRITE , 0 , 0 , 0 , 0 ) ) ;
radeon_ring_write ( ring , ( 0xf < < 16 ) | ( HDP_MEM_COHERENCY_FLUSH_CNTL > > 2 ) ) ;
radeon_ring_write ( ring , 1 ) ;
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_SRBM_WRITE , 0 , 0 , 0 , 0 ) ) ;
radeon_ring_write ( ring , ( 0xf < < 16 ) | ( VM_INVALIDATE_REQUEST > > 2 ) ) ;
2014-11-19 14:01:19 +01:00
radeon_ring_write ( ring , 1 < < vm_id ) ;
2015-01-05 19:54:50 -05:00
/* wait for invalidate to complete */
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_POLL_REG_MEM , 0 , 0 , 0 , 0 ) ) ;
radeon_ring_write ( ring , VM_INVALIDATE_REQUEST ) ;
radeon_ring_write ( ring , 0xff < < 16 ) ; /* retry */
radeon_ring_write ( ring , 1 < < vm_id ) ; /* mask */
radeon_ring_write ( ring , 0 ) ; /* value */
radeon_ring_write ( ring , ( 0 < < 28 ) | 0x20 ) ; /* func(always) | poll interval */
2013-08-13 11:56:54 +02:00
}
/**
* si_copy_dma - copy pages using the DMA engine
*
* @ rdev : radeon_device pointer
* @ src_offset : src GPU address
* @ dst_offset : dst GPU address
* @ num_gpu_pages : number of GPU pages to xfer
2014-09-04 20:01:53 +02:00
* @ resv : reservation object to sync to
2013-08-13 11:56:54 +02:00
*
* Copy GPU paging using the DMA engine ( SI ) .
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback .
*/
2014-09-04 20:01:53 +02:00
struct radeon_fence * si_copy_dma ( struct radeon_device * rdev ,
uint64_t src_offset , uint64_t dst_offset ,
unsigned num_gpu_pages ,
2019-08-11 10:06:32 +02:00
struct dma_resv * resv )
2013-08-13 11:56:54 +02:00
{
2014-09-04 20:01:53 +02:00
struct radeon_fence * fence ;
2014-11-19 14:01:22 +01:00
struct radeon_sync sync ;
2013-08-13 11:56:54 +02:00
int ring_index = rdev - > asic - > copy . dma_ring_index ;
struct radeon_ring * ring = & rdev - > ring [ ring_index ] ;
u32 size_in_bytes , cur_size_in_bytes ;
int i , num_loops ;
int r = 0 ;
2014-11-19 14:01:22 +01:00
radeon_sync_create ( & sync ) ;
2013-08-13 11:56:54 +02:00
size_in_bytes = ( num_gpu_pages < < RADEON_GPU_PAGE_SHIFT ) ;
num_loops = DIV_ROUND_UP ( size_in_bytes , 0xfffff ) ;
r = radeon_ring_lock ( rdev , ring , num_loops * 5 + 11 ) ;
if ( r ) {
DRM_ERROR ( " radeon: moving bo (%d). \n " , r ) ;
2014-11-19 14:01:22 +01:00
radeon_sync_free ( rdev , & sync , NULL ) ;
2014-09-04 20:01:53 +02:00
return ERR_PTR ( r ) ;
2013-08-13 11:56:54 +02:00
}
2014-11-19 14:01:22 +01:00
radeon_sync_resv ( rdev , & sync , resv , false ) ;
radeon_sync_rings ( rdev , & sync , ring - > idx ) ;
2013-08-13 11:56:54 +02:00
for ( i = 0 ; i < num_loops ; i + + ) {
cur_size_in_bytes = size_in_bytes ;
if ( cur_size_in_bytes > 0xFFFFF )
cur_size_in_bytes = 0xFFFFF ;
size_in_bytes - = cur_size_in_bytes ;
radeon_ring_write ( ring , DMA_PACKET ( DMA_PACKET_COPY , 1 , 0 , 0 , cur_size_in_bytes ) ) ;
2014-06-03 20:51:46 +02:00
radeon_ring_write ( ring , lower_32_bits ( dst_offset ) ) ;
radeon_ring_write ( ring , lower_32_bits ( src_offset ) ) ;
2013-08-13 11:56:54 +02:00
radeon_ring_write ( ring , upper_32_bits ( dst_offset ) & 0xff ) ;
radeon_ring_write ( ring , upper_32_bits ( src_offset ) & 0xff ) ;
src_offset + = cur_size_in_bytes ;
dst_offset + = cur_size_in_bytes ;
}
2014-09-04 20:01:53 +02:00
r = radeon_fence_emit ( rdev , & fence , ring - > idx ) ;
2013-08-13 11:56:54 +02:00
if ( r ) {
radeon_ring_unlock_undo ( rdev , ring ) ;
2014-11-19 14:01:22 +01:00
radeon_sync_free ( rdev , & sync , NULL ) ;
2014-09-04 20:01:53 +02:00
return ERR_PTR ( r ) ;
2013-08-13 11:56:54 +02:00
}
2014-08-18 17:34:55 +09:00
radeon_ring_unlock_commit ( rdev , ring , false ) ;
2014-11-19 14:01:22 +01:00
radeon_sync_free ( rdev , & sync , fence ) ;
2013-08-13 11:56:54 +02:00
2014-09-04 20:01:53 +02:00
return fence ;
2013-08-13 11:56:54 +02:00
}