2015-04-20 17:09:27 -04:00
/*
* Copyright 2013 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Christian König < christian . koenig @ amd . com >
*/
# include <linux/firmware.h>
# include <drm/drmP.h>
# include "amdgpu.h"
# include "amdgpu_uvd.h"
# include "cikd.h"
# include "uvd/uvd_4_2_d.h"
# include "uvd/uvd_4_2_sh_mask.h"
# include "oss/oss_2_0_d.h"
# include "oss/oss_2_0_sh_mask.h"
2016-06-22 14:25:54 +02:00
# include "bif/bif_4_1_d.h"
2016-10-26 17:04:33 +08:00
# include "smu/smu_7_0_1_d.h"
# include "smu/smu_7_0_1_sh_mask.h"
2015-04-20 17:09:27 -04:00
static void uvd_v4_2_mc_resume ( struct amdgpu_device * adev ) ;
static void uvd_v4_2_set_ring_funcs ( struct amdgpu_device * adev ) ;
static void uvd_v4_2_set_irq_funcs ( struct amdgpu_device * adev ) ;
static int uvd_v4_2_start ( struct amdgpu_device * adev ) ;
static void uvd_v4_2_stop ( struct amdgpu_device * adev ) ;
2016-11-04 20:35:46 +08:00
static int uvd_v4_2_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state ) ;
2017-01-12 21:48:26 +08:00
static void uvd_v4_2_set_dcm ( struct amdgpu_device * adev ,
bool sw_mode ) ;
2015-04-20 17:09:27 -04:00
/**
* uvd_v4_2_ring_get_rptr - get read pointer
*
* @ ring : amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
2016-03-12 09:32:30 +08:00
static uint64_t uvd_v4_2_ring_get_rptr ( struct amdgpu_ring * ring )
2015-04-20 17:09:27 -04:00
{
struct amdgpu_device * adev = ring - > adev ;
return RREG32 ( mmUVD_RBC_RB_RPTR ) ;
}
/**
* uvd_v4_2_ring_get_wptr - get write pointer
*
* @ ring : amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
2016-03-12 09:32:30 +08:00
static uint64_t uvd_v4_2_ring_get_wptr ( struct amdgpu_ring * ring )
2015-04-20 17:09:27 -04:00
{
struct amdgpu_device * adev = ring - > adev ;
return RREG32 ( mmUVD_RBC_RB_WPTR ) ;
}
/**
* uvd_v4_2_ring_set_wptr - set write pointer
*
* @ ring : amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void uvd_v4_2_ring_set_wptr ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
2016-03-12 09:32:30 +08:00
WREG32 ( mmUVD_RBC_RB_WPTR , lower_32_bits ( ring - > wptr ) ) ;
2015-04-20 17:09:27 -04:00
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_early_init ( void * handle )
2015-04-20 17:09:27 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
uvd_v4_2_set_ring_funcs ( adev ) ;
uvd_v4_2_set_irq_funcs ( adev ) ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_sw_init ( void * handle )
2015-04-20 17:09:27 -04:00
{
struct amdgpu_ring * ring ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
int r ;
/* UVD TRAP */
2016-03-29 18:28:50 -04:00
r = amdgpu_irq_add_id ( adev , AMDGPU_IH_CLIENTID_LEGACY , 124 , & adev - > uvd . irq ) ;
2015-04-20 17:09:27 -04:00
if ( r )
return r ;
r = amdgpu_uvd_sw_init ( adev ) ;
if ( r )
return r ;
r = amdgpu_uvd_resume ( adev ) ;
if ( r )
return r ;
ring = & adev - > uvd . ring ;
sprintf ( ring - > name , " uvd " ) ;
2016-10-05 16:09:32 +02:00
r = amdgpu_ring_init ( adev , ring , 512 , & adev - > uvd . irq , 0 ) ;
2015-04-20 17:09:27 -04:00
return r ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_sw_fini ( void * handle )
2015-04-20 17:09:27 -04:00
{
int r ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
r = amdgpu_uvd_suspend ( adev ) ;
if ( r )
return r ;
2017-03-30 15:45:52 +08:00
return amdgpu_uvd_sw_fini ( adev ) ;
2015-04-20 17:09:27 -04:00
}
2017-03-30 15:45:52 +08:00
2017-01-12 21:48:26 +08:00
static void uvd_v4_2_enable_mgcg ( struct amdgpu_device * adev ,
bool enable ) ;
2015-04-20 17:09:27 -04:00
/**
* uvd_v4_2_hw_init - start and test UVD block
*
* @ adev : amdgpu_device pointer
*
* Initialize the hardware , boot up the VCPU and do some testing
*/
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_hw_init ( void * handle )
2015-04-20 17:09:27 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
struct amdgpu_ring * ring = & adev - > uvd . ring ;
uint32_t tmp ;
int r ;
2017-01-12 21:48:26 +08:00
uvd_v4_2_enable_mgcg ( adev , true ) ;
2016-11-04 20:35:46 +08:00
amdgpu_asic_set_uvd_clocks ( adev , 10000 , 10000 ) ;
2015-04-20 17:09:27 -04:00
ring - > ready = true ;
r = amdgpu_ring_test_ring ( ring ) ;
if ( r ) {
ring - > ready = false ;
goto done ;
}
2016-01-21 11:28:53 +01:00
r = amdgpu_ring_alloc ( ring , 10 ) ;
2015-04-20 17:09:27 -04:00
if ( r ) {
DRM_ERROR ( " amdgpu: ring failed to lock UVD ring (%d). \n " , r ) ;
goto done ;
}
tmp = PACKET0 ( mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL , 0 ) ;
amdgpu_ring_write ( ring , tmp ) ;
amdgpu_ring_write ( ring , 0xFFFFF ) ;
tmp = PACKET0 ( mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL , 0 ) ;
amdgpu_ring_write ( ring , tmp ) ;
amdgpu_ring_write ( ring , 0xFFFFF ) ;
tmp = PACKET0 ( mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL , 0 ) ;
amdgpu_ring_write ( ring , tmp ) ;
amdgpu_ring_write ( ring , 0xFFFFF ) ;
/* Clear timeout status bits */
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_SEMA_TIMEOUT_STATUS , 0 ) ) ;
amdgpu_ring_write ( ring , 0x8 ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_SEMA_CNTL , 0 ) ) ;
amdgpu_ring_write ( ring , 3 ) ;
2016-01-21 11:28:53 +01:00
amdgpu_ring_commit ( ring ) ;
2015-04-20 17:09:27 -04:00
done :
if ( ! r )
DRM_INFO ( " UVD initialized successfully. \n " ) ;
return r ;
}
/**
* uvd_v4_2_hw_fini - stop the hardware block
*
* @ adev : amdgpu_device pointer
*
* Stop the UVD block , mark ring as not ready any more
*/
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_hw_fini ( void * handle )
2015-04-20 17:09:27 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
struct amdgpu_ring * ring = & adev - > uvd . ring ;
2017-01-20 15:56:45 +08:00
if ( RREG32 ( mmUVD_STATUS ) ! = 0 )
uvd_v4_2_stop ( adev ) ;
2015-04-20 17:09:27 -04:00
ring - > ready = false ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_suspend ( void * handle )
2015-04-20 17:09:27 -04:00
{
int r ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
2016-04-01 10:36:06 -04:00
r = uvd_v4_2_hw_fini ( adev ) ;
2015-04-20 17:09:27 -04:00
if ( r )
return r ;
2017-03-30 15:45:52 +08:00
return amdgpu_uvd_suspend ( adev ) ;
2015-04-20 17:09:27 -04:00
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_resume ( void * handle )
2015-04-20 17:09:27 -04:00
{
int r ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
r = amdgpu_uvd_resume ( adev ) ;
if ( r )
return r ;
2017-03-30 15:45:52 +08:00
return uvd_v4_2_hw_init ( adev ) ;
2015-04-20 17:09:27 -04:00
}
/**
* uvd_v4_2_start - start UVD block
*
* @ adev : amdgpu_device pointer
*
* Setup and start the UVD block
*/
static int uvd_v4_2_start ( struct amdgpu_device * adev )
{
struct amdgpu_ring * ring = & adev - > uvd . ring ;
uint32_t rb_bufsz ;
int i , j , r ;
2017-01-20 15:56:45 +08:00
u32 tmp ;
2015-04-20 17:09:27 -04:00
/* disable byte swapping */
u32 lmi_swap_cntl = 0 ;
u32 mp_swap_cntl = 0 ;
2017-01-20 15:56:45 +08:00
/* set uvd busy */
WREG32_P ( mmUVD_STATUS , 1 < < 2 , ~ ( 1 < < 2 ) ) ;
2015-04-20 17:09:27 -04:00
2017-01-20 15:56:45 +08:00
uvd_v4_2_set_dcm ( adev , true ) ;
WREG32 ( mmUVD_CGC_GATE , 0 ) ;
2015-04-20 17:09:27 -04:00
/* take UVD block out of reset */
WREG32_P ( mmSRBM_SOFT_RESET , 0 , ~ SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK ) ;
mdelay ( 5 ) ;
2017-01-20 15:56:45 +08:00
/* enable VCPU clock */
WREG32 ( mmUVD_VCPU_CNTL , 1 < < 9 ) ;
/* disable interupt */
WREG32_P ( mmUVD_MASTINT_EN , 0 , ~ ( 1 < < 1 ) ) ;
2015-04-20 17:09:27 -04:00
# ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
lmi_swap_cntl = 0xa ;
mp_swap_cntl = 0 ;
# endif
WREG32 ( mmUVD_LMI_SWAP_CNTL , lmi_swap_cntl ) ;
WREG32 ( mmUVD_MP_SWAP_CNTL , mp_swap_cntl ) ;
2017-01-20 15:56:45 +08:00
/* initialize UVD memory controller */
WREG32 ( mmUVD_LMI_CTRL , 0x203108 ) ;
tmp = RREG32 ( mmUVD_MPC_CNTL ) ;
WREG32 ( mmUVD_MPC_CNTL , tmp | 0x10 ) ;
2015-04-20 17:09:27 -04:00
WREG32 ( mmUVD_MPC_SET_MUXA0 , 0x40c2040 ) ;
WREG32 ( mmUVD_MPC_SET_MUXA1 , 0x0 ) ;
WREG32 ( mmUVD_MPC_SET_MUXB0 , 0x40c2040 ) ;
WREG32 ( mmUVD_MPC_SET_MUXB1 , 0x0 ) ;
WREG32 ( mmUVD_MPC_SET_ALU , 0 ) ;
WREG32 ( mmUVD_MPC_SET_MUX , 0x88 ) ;
2017-01-20 15:56:45 +08:00
uvd_v4_2_mc_resume ( adev ) ;
2015-04-20 17:09:27 -04:00
2017-01-20 15:56:45 +08:00
tmp = RREG32_UVD_CTX ( ixUVD_LMI_CACHE_CTRL ) ;
WREG32_UVD_CTX ( ixUVD_LMI_CACHE_CTRL , tmp & ( ~ 0x10 ) ) ;
2015-04-20 17:09:27 -04:00
/* enable UMC */
WREG32_P ( mmUVD_LMI_CTRL2 , 0 , ~ ( 1 < < 8 ) ) ;
2017-01-20 15:56:45 +08:00
WREG32_P ( mmUVD_SOFT_RESET , 0 , ~ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK ) ;
WREG32_P ( mmUVD_SOFT_RESET , 0 , ~ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK ) ;
WREG32_P ( mmUVD_SOFT_RESET , 0 , ~ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK ) ;
2015-04-20 17:09:27 -04:00
mdelay ( 10 ) ;
for ( i = 0 ; i < 10 ; + + i ) {
uint32_t status ;
for ( j = 0 ; j < 100 ; + + j ) {
status = RREG32 ( mmUVD_STATUS ) ;
if ( status & 2 )
break ;
mdelay ( 10 ) ;
}
r = 0 ;
if ( status & 2 )
break ;
DRM_ERROR ( " UVD not responding, trying to reset the VCPU!!! \n " ) ;
WREG32_P ( mmUVD_SOFT_RESET , UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK ,
~ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK ) ;
mdelay ( 10 ) ;
WREG32_P ( mmUVD_SOFT_RESET , 0 , ~ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK ) ;
mdelay ( 10 ) ;
r = - 1 ;
}
if ( r ) {
DRM_ERROR ( " UVD not responding, giving up!!! \n " ) ;
return r ;
}
/* enable interupt */
WREG32_P ( mmUVD_MASTINT_EN , 3 < < 1 , ~ ( 3 < < 1 ) ) ;
2017-01-20 15:56:45 +08:00
WREG32_P ( mmUVD_STATUS , 0 , ~ ( 1 < < 2 ) ) ;
2015-04-20 17:09:27 -04:00
/* force RBC into idle state */
WREG32 ( mmUVD_RBC_RB_CNTL , 0x11010101 ) ;
/* Set the write pointer delay */
WREG32 ( mmUVD_RBC_RB_WPTR_CNTL , 0 ) ;
/* programm the 4GB memory segment for rptr and ring buffer */
WREG32 ( mmUVD_LMI_EXT40_ADDR , upper_32_bits ( ring - > gpu_addr ) |
( 0x7 < < 16 ) | ( 0x1 < < 31 ) ) ;
/* Initialize the ring buffer's read and write pointers */
WREG32 ( mmUVD_RBC_RB_RPTR , 0x0 ) ;
ring - > wptr = RREG32 ( mmUVD_RBC_RB_RPTR ) ;
2016-03-12 09:32:30 +08:00
WREG32 ( mmUVD_RBC_RB_WPTR , lower_32_bits ( ring - > wptr ) ) ;
2015-04-20 17:09:27 -04:00
/* set the ring address */
WREG32 ( mmUVD_RBC_RB_BASE , ring - > gpu_addr ) ;
/* Set ring buffer size */
rb_bufsz = order_base_2 ( ring - > ring_size ) ;
rb_bufsz = ( 0x1 < < 8 ) | rb_bufsz ;
WREG32_P ( mmUVD_RBC_RB_CNTL , rb_bufsz , ~ 0x11f1f ) ;
return 0 ;
}
/**
* uvd_v4_2_stop - stop UVD block
*
* @ adev : amdgpu_device pointer
*
* stop the UVD block
*/
static void uvd_v4_2_stop ( struct amdgpu_device * adev )
{
2017-01-20 15:56:45 +08:00
uint32_t i , j ;
uint32_t status ;
2015-04-20 17:09:27 -04:00
WREG32 ( mmUVD_RBC_RB_CNTL , 0x11010101 ) ;
2017-01-20 15:56:45 +08:00
for ( i = 0 ; i < 10 ; + + i ) {
for ( j = 0 ; j < 100 ; + + j ) {
status = RREG32 ( mmUVD_STATUS ) ;
if ( status & 2 )
break ;
mdelay ( 1 ) ;
}
2017-02-13 14:11:40 -05:00
if ( status & 2 )
break ;
2017-01-20 15:56:45 +08:00
}
for ( i = 0 ; i < 10 ; + + i ) {
for ( j = 0 ; j < 100 ; + + j ) {
status = RREG32 ( mmUVD_LMI_STATUS ) ;
if ( status & 0xf )
break ;
mdelay ( 1 ) ;
}
2017-02-13 14:11:40 -05:00
if ( status & 0xf )
break ;
2017-01-20 15:56:45 +08:00
}
2015-04-20 17:09:27 -04:00
/* Stall UMC and register bus before resetting VCPU */
WREG32_P ( mmUVD_LMI_CTRL2 , 1 < < 8 , ~ ( 1 < < 8 ) ) ;
2017-01-20 15:56:45 +08:00
for ( i = 0 ; i < 10 ; + + i ) {
for ( j = 0 ; j < 100 ; + + j ) {
status = RREG32 ( mmUVD_LMI_STATUS ) ;
if ( status & 0x240 )
break ;
mdelay ( 1 ) ;
}
2017-02-13 14:11:40 -05:00
if ( status & 0x240 )
break ;
2017-01-20 15:56:45 +08:00
}
2015-04-20 17:09:27 -04:00
2017-01-20 15:56:45 +08:00
WREG32_P ( 0x3D49 , 0 , ~ ( 1 < < 2 ) ) ;
2015-04-20 17:09:27 -04:00
2017-01-20 15:56:45 +08:00
WREG32_P ( mmUVD_VCPU_CNTL , 0 , ~ ( 1 < < 9 ) ) ;
/* put LMI, VCPU, RBC etc... into reset */
WREG32 ( mmUVD_SOFT_RESET , UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK ) ;
WREG32 ( mmUVD_STATUS , 0 ) ;
2017-01-12 21:48:26 +08:00
uvd_v4_2_set_dcm ( adev , false ) ;
2015-04-20 17:09:27 -04:00
}
/**
* uvd_v4_2_ring_emit_fence - emit an fence & trap command
*
* @ ring : amdgpu_ring pointer
* @ fence : fence to emit
*
* Write a fence and a trap command to the ring .
*/
static void uvd_v4_2_ring_emit_fence ( struct amdgpu_ring * ring , u64 addr , u64 seq ,
2015-06-01 14:35:03 +08:00
unsigned flags )
2015-04-20 17:09:27 -04:00
{
2015-06-01 14:35:03 +08:00
WARN_ON ( flags & AMDGPU_FENCE_FLAG_64BIT ) ;
2015-04-20 17:09:27 -04:00
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_CONTEXT_ID , 0 ) ) ;
amdgpu_ring_write ( ring , seq ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_GPCOM_VCPU_DATA0 , 0 ) ) ;
amdgpu_ring_write ( ring , addr & 0xffffffff ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_GPCOM_VCPU_DATA1 , 0 ) ) ;
amdgpu_ring_write ( ring , upper_32_bits ( addr ) & 0xff ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_GPCOM_VCPU_CMD , 0 ) ) ;
amdgpu_ring_write ( ring , 0 ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_GPCOM_VCPU_DATA0 , 0 ) ) ;
amdgpu_ring_write ( ring , 0 ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_GPCOM_VCPU_DATA1 , 0 ) ) ;
amdgpu_ring_write ( ring , 0 ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_GPCOM_VCPU_CMD , 0 ) ) ;
amdgpu_ring_write ( ring , 2 ) ;
}
/**
* uvd_v4_2_ring_test_ring - register write test
*
* @ ring : amdgpu_ring pointer
*
* Test if we can successfully write to the context register
*/
static int uvd_v4_2_ring_test_ring ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
uint32_t tmp = 0 ;
unsigned i ;
int r ;
WREG32 ( mmUVD_CONTEXT_ID , 0xCAFEDEAD ) ;
2016-01-21 11:28:53 +01:00
r = amdgpu_ring_alloc ( ring , 3 ) ;
2015-04-20 17:09:27 -04:00
if ( r ) {
DRM_ERROR ( " amdgpu: cp failed to lock ring %d (%d). \n " ,
ring - > idx , r ) ;
return r ;
}
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_CONTEXT_ID , 0 ) ) ;
amdgpu_ring_write ( ring , 0xDEADBEEF ) ;
2016-01-21 11:28:53 +01:00
amdgpu_ring_commit ( ring ) ;
2015-04-20 17:09:27 -04:00
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
tmp = RREG32 ( mmUVD_CONTEXT_ID ) ;
if ( tmp = = 0xDEADBEEF )
break ;
DRM_UDELAY ( 1 ) ;
}
if ( i < adev - > usec_timeout ) {
2017-10-26 09:30:38 +08:00
DRM_DEBUG ( " ring test on %d succeeded in %d usecs \n " ,
2015-04-20 17:09:27 -04:00
ring - > idx , i ) ;
} else {
DRM_ERROR ( " amdgpu: ring %d test failed (0x%08X) \n " ,
ring - > idx , tmp ) ;
r = - EINVAL ;
}
return r ;
}
/**
* uvd_v4_2_ring_emit_ib - execute indirect buffer
*
* @ ring : amdgpu_ring pointer
* @ ib : indirect buffer to execute
*
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib ( struct amdgpu_ring * ring ,
2016-05-06 17:50:03 +02:00
struct amdgpu_ib * ib ,
2017-12-18 17:08:25 +01:00
unsigned vmid , bool ctx_switch )
2015-04-20 17:09:27 -04:00
{
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_RBC_IB_BASE , 0 ) ) ;
amdgpu_ring_write ( ring , ib - > gpu_addr ) ;
amdgpu_ring_write ( ring , PACKET0 ( mmUVD_RBC_IB_SIZE , 0 ) ) ;
amdgpu_ring_write ( ring , ib - > length_dw ) ;
}
/**
* uvd_v4_2_mc_resume - memory controller programming
*
* @ adev : amdgpu_device pointer
*
* Let the UVD memory controller know it ' s offsets
*/
static void uvd_v4_2_mc_resume ( struct amdgpu_device * adev )
{
uint64_t addr ;
uint32_t size ;
/* programm the VCPU memory controller bits 0-27 */
addr = ( adev - > uvd . gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET ) > > 3 ;
2017-11-10 19:28:01 +01:00
size = AMDGPU_UVD_FIRMWARE_SIZE ( adev ) > > 3 ;
2015-04-20 17:09:27 -04:00
WREG32 ( mmUVD_VCPU_CACHE_OFFSET0 , addr ) ;
WREG32 ( mmUVD_VCPU_CACHE_SIZE0 , size ) ;
addr + = size ;
2016-04-12 13:46:15 +02:00
size = AMDGPU_UVD_HEAP_SIZE > > 3 ;
2015-04-20 17:09:27 -04:00
WREG32 ( mmUVD_VCPU_CACHE_OFFSET1 , addr ) ;
WREG32 ( mmUVD_VCPU_CACHE_SIZE1 , size ) ;
addr + = size ;
2016-04-12 13:46:15 +02:00
size = ( AMDGPU_UVD_STACK_SIZE +
( AMDGPU_UVD_SESSION_SIZE * adev - > uvd . max_handles ) ) > > 3 ;
2015-04-20 17:09:27 -04:00
WREG32 ( mmUVD_VCPU_CACHE_OFFSET2 , addr ) ;
WREG32 ( mmUVD_VCPU_CACHE_SIZE2 , size ) ;
/* bits 28-31 */
addr = ( adev - > uvd . gpu_addr > > 28 ) & 0xF ;
WREG32 ( mmUVD_LMI_ADDR_EXT , ( addr < < 12 ) | ( addr < < 0 ) ) ;
/* bits 32-39 */
addr = ( adev - > uvd . gpu_addr > > 32 ) & 0xFF ;
WREG32 ( mmUVD_LMI_EXT40_ADDR , addr | ( 0x9 < < 16 ) | ( 0x1 < < 31 ) ) ;
2016-02-12 03:12:43 -05:00
WREG32 ( mmUVD_UDEC_ADDR_CONFIG , adev - > gfx . config . gb_addr_config ) ;
WREG32 ( mmUVD_UDEC_DB_ADDR_CONFIG , adev - > gfx . config . gb_addr_config ) ;
WREG32 ( mmUVD_UDEC_DBW_ADDR_CONFIG , adev - > gfx . config . gb_addr_config ) ;
2015-04-20 17:09:27 -04:00
}
static void uvd_v4_2_enable_mgcg ( struct amdgpu_device * adev ,
bool enable )
{
u32 orig , data ;
2016-02-05 10:56:22 -05:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_UVD_MGCG ) ) {
2015-04-20 17:09:27 -04:00
data = RREG32_UVD_CTX ( ixUVD_CGC_MEM_CTRL ) ;
2016-11-04 20:35:46 +08:00
data | = 0xfff ;
2015-04-20 17:09:27 -04:00
WREG32_UVD_CTX ( ixUVD_CGC_MEM_CTRL , data ) ;
orig = data = RREG32 ( mmUVD_CGC_CTRL ) ;
data | = UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK ;
if ( orig ! = data )
WREG32 ( mmUVD_CGC_CTRL , data ) ;
} else {
data = RREG32_UVD_CTX ( ixUVD_CGC_MEM_CTRL ) ;
data & = ~ 0xfff ;
WREG32_UVD_CTX ( ixUVD_CGC_MEM_CTRL , data ) ;
orig = data = RREG32 ( mmUVD_CGC_CTRL ) ;
data & = ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK ;
if ( orig ! = data )
WREG32 ( mmUVD_CGC_CTRL , data ) ;
}
}
static void uvd_v4_2_set_dcm ( struct amdgpu_device * adev ,
bool sw_mode )
{
u32 tmp , tmp2 ;
2016-11-09 18:03:10 +08:00
WREG32_FIELD ( UVD_CGC_GATE , REGS , 0 ) ;
2015-04-20 17:09:27 -04:00
tmp = RREG32 ( mmUVD_CGC_CTRL ) ;
tmp & = ~ ( UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK ) ;
tmp | = UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
( 1 < < UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT ) |
( 4 < < UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT ) ;
if ( sw_mode ) {
tmp & = ~ 0x7ffff800 ;
tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
( 7 < < UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT ) ;
} else {
tmp | = 0x7ffff800 ;
tmp2 = 0 ;
}
WREG32 ( mmUVD_CGC_CTRL , tmp ) ;
WREG32_UVD_CTX ( ixUVD_CGC_CTRL2 , tmp2 ) ;
}
2015-05-22 14:39:35 -04:00
static bool uvd_v4_2_is_idle ( void * handle )
2015-04-20 17:09:27 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
return ! ( RREG32 ( mmSRBM_STATUS ) & SRBM_STATUS__UVD_BUSY_MASK ) ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_wait_for_idle ( void * handle )
2015-04-20 17:09:27 -04:00
{
unsigned i ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
if ( ! ( RREG32 ( mmSRBM_STATUS ) & SRBM_STATUS__UVD_BUSY_MASK ) )
return 0 ;
}
return - ETIMEDOUT ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_soft_reset ( void * handle )
2015-04-20 17:09:27 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:09:27 -04:00
uvd_v4_2_stop ( adev ) ;
WREG32_P ( mmSRBM_SOFT_RESET , SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK ,
~ SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK ) ;
mdelay ( 5 ) ;
return uvd_v4_2_start ( adev ) ;
}
static int uvd_v4_2_set_interrupt_state ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * source ,
unsigned type ,
enum amdgpu_interrupt_state state )
{
// TODO
return 0 ;
}
static int uvd_v4_2_process_interrupt ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * source ,
struct amdgpu_iv_entry * entry )
{
DRM_DEBUG ( " IH: UVD TRAP \n " ) ;
amdgpu_fence_process ( & adev - > uvd . ring ) ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state )
2015-04-20 17:09:27 -04:00
{
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int uvd_v4_2_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
2015-04-20 17:09:27 -04:00
{
/* This doesn't actually powergate the UVD block.
* That ' s done in the dpm code via the SMC . This
* just re - inits the block as necessary . The actual
* gating still happens in the dpm code . We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
2016-02-04 23:26:56 -05:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-05-22 14:39:35 -04:00
if ( state = = AMD_PG_STATE_GATE ) {
2015-04-20 17:09:27 -04:00
uvd_v4_2_stop ( adev ) ;
2017-01-20 15:07:47 +08:00
if ( adev - > pg_flags & AMD_PG_SUPPORT_UVD & & amdgpu_dpm = = 0 ) {
2017-02-08 17:17:55 +08:00
if ( ! ( RREG32_SMC ( ixCURRENT_PG_STATUS ) &
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK ) ) {
2017-01-20 15:07:47 +08:00
WREG32 ( mmUVD_PGFSM_CONFIG , ( UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK ) ) ;
mdelay ( 20 ) ;
}
}
2015-04-20 17:09:27 -04:00
return 0 ;
} else {
2017-01-20 15:07:47 +08:00
if ( adev - > pg_flags & AMD_PG_SUPPORT_UVD & & amdgpu_dpm = = 0 ) {
2017-02-08 17:17:55 +08:00
if ( RREG32_SMC ( ixCURRENT_PG_STATUS ) &
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK ) {
2017-01-20 15:07:47 +08:00
WREG32 ( mmUVD_PGFSM_CONFIG , ( UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK ) ) ;
mdelay ( 30 ) ;
}
}
2015-04-20 17:09:27 -04:00
return uvd_v4_2_start ( adev ) ;
}
}
2016-10-13 17:41:13 -04:00
static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
2016-05-04 14:28:35 -04:00
. name = " uvd_v4_2 " ,
2015-04-20 17:09:27 -04:00
. early_init = uvd_v4_2_early_init ,
. late_init = NULL ,
. sw_init = uvd_v4_2_sw_init ,
. sw_fini = uvd_v4_2_sw_fini ,
. hw_init = uvd_v4_2_hw_init ,
. hw_fini = uvd_v4_2_hw_fini ,
. suspend = uvd_v4_2_suspend ,
. resume = uvd_v4_2_resume ,
. is_idle = uvd_v4_2_is_idle ,
. wait_for_idle = uvd_v4_2_wait_for_idle ,
. soft_reset = uvd_v4_2_soft_reset ,
. set_clockgating_state = uvd_v4_2_set_clockgating_state ,
. set_powergating_state = uvd_v4_2_set_powergating_state ,
} ;
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
2016-10-05 15:36:39 +02:00
. type = AMDGPU_RING_TYPE_UVD ,
2016-10-05 16:09:32 +02:00
. align_mask = 0xf ,
. nop = PACKET0 ( mmUVD_NO_OP , 0 ) ,
2016-03-12 09:32:30 +08:00
. support_64bit_ptrs = false ,
2015-04-20 17:09:27 -04:00
. get_rptr = uvd_v4_2_ring_get_rptr ,
. get_wptr = uvd_v4_2_ring_get_wptr ,
. set_wptr = uvd_v4_2_ring_set_wptr ,
. parse_cs = amdgpu_uvd_ring_parse_cs ,
2016-10-05 14:29:38 +02:00
. emit_frame_size =
14 , /* uvd_v4_2_ring_emit_fence x1 no user fence */
. emit_ib_size = 4 , /* uvd_v4_2_ring_emit_ib */
2015-04-20 17:09:27 -04:00
. emit_ib = uvd_v4_2_ring_emit_ib ,
. emit_fence = uvd_v4_2_ring_emit_fence ,
. test_ring = uvd_v4_2_ring_test_ring ,
2016-07-05 16:47:54 +02:00
. test_ib = amdgpu_uvd_ring_test_ib ,
2015-09-01 13:04:08 +08:00
. insert_nop = amdgpu_ring_insert_nop ,
2016-01-31 12:20:55 +01:00
. pad_ib = amdgpu_ring_generic_pad_ib ,
2016-07-20 14:11:26 +02:00
. begin_use = amdgpu_uvd_ring_begin_use ,
. end_use = amdgpu_uvd_ring_end_use ,
2015-04-20 17:09:27 -04:00
} ;
static void uvd_v4_2_set_ring_funcs ( struct amdgpu_device * adev )
{
adev - > uvd . ring . funcs = & uvd_v4_2_ring_funcs ;
}
static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
. set = uvd_v4_2_set_interrupt_state ,
. process = uvd_v4_2_process_interrupt ,
} ;
static void uvd_v4_2_set_irq_funcs ( struct amdgpu_device * adev )
{
adev - > uvd . irq . num_types = 1 ;
adev - > uvd . irq . funcs = & uvd_v4_2_irq_funcs ;
}
2016-10-13 17:41:13 -04:00
const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_UVD ,
. major = 4 ,
. minor = 2 ,
. rev = 0 ,
. funcs = & uvd_v4_2_ip_funcs ,
} ;