2016-12-21 13:21:52 -05:00
/*
* Copyright 2016 Advanced Micro Devices , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
*/
# include <linux/firmware.h>
# include <linux/module.h>
# include <drm/drmP.h>
# include <drm/drm.h>
# include "amdgpu.h"
# include "amdgpu_pm.h"
# include "amdgpu_vcn.h"
# include "soc15d.h"
# include "soc15_common.h"
2017-11-27 17:57:30 +08:00
# include "vcn/vcn_1_0_offset.h"
2018-09-21 14:43:18 -04:00
# include "vcn/vcn_1_0_sh_mask.h"
2016-12-21 13:21:52 -05:00
/* 1 second timeout */
# define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
# define FIRMWARE_RAVEN "amdgpu / raven_vcn.bin"
2018-07-10 20:17:13 +08:00
# define FIRMWARE_PICASSO "amdgpu / picasso_vcn.bin"
2018-06-15 16:01:41 -05:00
# define FIRMWARE_RAVEN2 "amdgpu / raven2_vcn.bin"
2018-10-15 15:07:08 -04:00
# define FIRMWARE_NAVI10 "amdgpu / navi10_vcn.bin"
2016-12-21 13:21:52 -05:00
MODULE_FIRMWARE ( FIRMWARE_RAVEN ) ;
2018-07-10 20:17:13 +08:00
MODULE_FIRMWARE ( FIRMWARE_PICASSO ) ;
2018-06-15 16:01:41 -05:00
MODULE_FIRMWARE ( FIRMWARE_RAVEN2 ) ;
2018-10-15 15:07:08 -04:00
MODULE_FIRMWARE ( FIRMWARE_NAVI10 ) ;
2016-12-21 13:21:52 -05:00
static void amdgpu_vcn_idle_work_handler ( struct work_struct * work ) ;
int amdgpu_vcn_sw_init ( struct amdgpu_device * adev )
{
unsigned long bo_size ;
const char * fw_name ;
const struct common_firmware_header * hdr ;
2018-06-19 13:44:04 -04:00
unsigned char fw_check ;
2016-12-21 13:21:52 -05:00
int r ;
INIT_DELAYED_WORK ( & adev - > vcn . idle_work , amdgpu_vcn_idle_work_handler ) ;
switch ( adev - > asic_type ) {
case CHIP_RAVEN :
2018-09-13 15:41:57 -05:00
if ( adev - > rev_id > = 8 )
2018-06-15 16:01:41 -05:00
fw_name = FIRMWARE_RAVEN2 ;
2018-09-13 15:41:57 -05:00
else if ( adev - > pdev - > device = = 0x15d8 )
fw_name = FIRMWARE_PICASSO ;
2018-06-15 16:01:41 -05:00
else
fw_name = FIRMWARE_RAVEN ;
2016-12-21 13:21:52 -05:00
break ;
2018-10-15 15:07:08 -04:00
case CHIP_NAVI10 :
fw_name = FIRMWARE_NAVI10 ;
break ;
2016-12-21 13:21:52 -05:00
default :
return - EINVAL ;
}
r = request_firmware ( & adev - > vcn . fw , fw_name , adev - > dev ) ;
if ( r ) {
dev_err ( adev - > dev , " amdgpu_vcn: Can't load firmware \" %s \" \n " ,
fw_name ) ;
return r ;
}
r = amdgpu_ucode_validate ( adev - > vcn . fw ) ;
if ( r ) {
dev_err ( adev - > dev , " amdgpu_vcn: Can't validate firmware \" %s \" \n " ,
fw_name ) ;
release_firmware ( adev - > vcn . fw ) ;
adev - > vcn . fw = NULL ;
return r ;
}
hdr = ( const struct common_firmware_header * ) adev - > vcn . fw - > data ;
2018-05-23 11:18:43 +08:00
adev - > vcn . fw_version = le32_to_cpu ( hdr - > ucode_version ) ;
2016-12-21 13:21:52 -05:00
2018-06-19 13:44:04 -04:00
/* Bit 20-23, it is encode major and non-zero for new naming convention.
* This field is part of version minor and DRM_DISABLED_FLAG in old naming
* convention . Since the l : wq ! atest version minor is 0x5B and DRM_DISABLED_FLAG
* is zero in old naming convention , this field is always zero so far .
* These four bits are used to tell which naming convention is present .
*/
fw_check = ( le32_to_cpu ( hdr - > ucode_version ) > > 20 ) & 0xf ;
if ( fw_check ) {
unsigned int dec_ver , enc_major , enc_minor , vep , fw_rev ;
fw_rev = le32_to_cpu ( hdr - > ucode_version ) & 0xfff ;
enc_minor = ( le32_to_cpu ( hdr - > ucode_version ) > > 12 ) & 0xff ;
enc_major = fw_check ;
dec_ver = ( le32_to_cpu ( hdr - > ucode_version ) > > 24 ) & 0xf ;
vep = ( le32_to_cpu ( hdr - > ucode_version ) > > 28 ) & 0xf ;
DRM_INFO ( " Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu \n " ,
enc_major , enc_minor , dec_ver , vep , fw_rev ) ;
} else {
unsigned int version_major , version_minor , family_id ;
family_id = le32_to_cpu ( hdr - > ucode_version ) & 0xff ;
version_major = ( le32_to_cpu ( hdr - > ucode_version ) > > 24 ) & 0xff ;
version_minor = ( le32_to_cpu ( hdr - > ucode_version ) > > 8 ) & 0xff ;
DRM_INFO ( " Found VCN firmware Version: %hu.%hu Family ID: %hu \n " ,
version_major , version_minor , family_id ) ;
}
2016-12-21 13:21:52 -05:00
2018-10-02 13:31:31 -04:00
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE ;
2018-08-10 00:31:42 +08:00
if ( adev - > firmware . load_type ! = AMDGPU_FW_LOAD_PSP )
bo_size + = AMDGPU_GPU_PAGE_ALIGN ( le32_to_cpu ( hdr - > ucode_size_bytes ) + 8 ) ;
2016-12-21 13:21:52 -05:00
r = amdgpu_bo_create_kernel ( adev , bo_size , PAGE_SIZE ,
AMDGPU_GEM_DOMAIN_VRAM , & adev - > vcn . vcpu_bo ,
& adev - > vcn . gpu_addr , & adev - > vcn . cpu_addr ) ;
if ( r ) {
dev_err ( adev - > dev , " (%d) failed to allocate vcn bo \n " , r ) ;
return r ;
}
return 0 ;
}
int amdgpu_vcn_sw_fini ( struct amdgpu_device * adev )
{
2017-02-21 15:21:18 -05:00
int i ;
2018-08-03 17:24:32 +02:00
kvfree ( adev - > vcn . saved_bo ) ;
2016-12-21 13:21:52 -05:00
amdgpu_bo_free_kernel ( & adev - > vcn . vcpu_bo ,
& adev - > vcn . gpu_addr ,
( void * * ) & adev - > vcn . cpu_addr ) ;
amdgpu_ring_fini ( & adev - > vcn . ring_dec ) ;
2017-02-21 15:21:18 -05:00
for ( i = 0 ; i < adev - > vcn . num_enc_rings ; + + i )
amdgpu_ring_fini ( & adev - > vcn . ring_enc [ i ] ) ;
2018-05-30 15:32:16 -04:00
amdgpu_ring_fini ( & adev - > vcn . ring_jpeg ) ;
2016-12-21 13:21:52 -05:00
release_firmware ( adev - > vcn . fw ) ;
return 0 ;
}
int amdgpu_vcn_suspend ( struct amdgpu_device * adev )
{
unsigned size ;
void * ptr ;
2018-09-27 20:48:39 +08:00
cancel_delayed_work_sync ( & adev - > vcn . idle_work ) ;
2016-12-21 13:21:52 -05:00
if ( adev - > vcn . vcpu_bo = = NULL )
return 0 ;
size = amdgpu_bo_size ( adev - > vcn . vcpu_bo ) ;
ptr = adev - > vcn . cpu_addr ;
2018-08-03 17:24:32 +02:00
adev - > vcn . saved_bo = kvmalloc ( size , GFP_KERNEL ) ;
2016-12-21 13:21:52 -05:00
if ( ! adev - > vcn . saved_bo )
return - ENOMEM ;
memcpy_fromio ( adev - > vcn . saved_bo , ptr , size ) ;
return 0 ;
}
int amdgpu_vcn_resume ( struct amdgpu_device * adev )
{
unsigned size ;
void * ptr ;
if ( adev - > vcn . vcpu_bo = = NULL )
return - EINVAL ;
size = amdgpu_bo_size ( adev - > vcn . vcpu_bo ) ;
ptr = adev - > vcn . cpu_addr ;
if ( adev - > vcn . saved_bo ! = NULL ) {
memcpy_toio ( ptr , adev - > vcn . saved_bo , size ) ;
2018-08-03 17:24:32 +02:00
kvfree ( adev - > vcn . saved_bo ) ;
2016-12-21 13:21:52 -05:00
adev - > vcn . saved_bo = NULL ;
} else {
const struct common_firmware_header * hdr ;
unsigned offset ;
hdr = ( const struct common_firmware_header * ) adev - > vcn . fw - > data ;
2018-08-10 00:31:42 +08:00
if ( adev - > firmware . load_type ! = AMDGPU_FW_LOAD_PSP ) {
offset = le32_to_cpu ( hdr - > ucode_array_offset_bytes ) ;
memcpy_toio ( adev - > vcn . cpu_addr , adev - > vcn . fw - > data + offset ,
le32_to_cpu ( hdr - > ucode_size_bytes ) ) ;
size - = le32_to_cpu ( hdr - > ucode_size_bytes ) ;
ptr + = le32_to_cpu ( hdr - > ucode_size_bytes ) ;
}
2016-12-21 13:21:52 -05:00
memset_io ( ptr , 0 , size ) ;
}
return 0 ;
}
2017-02-06 10:52:46 -05:00
static void amdgpu_vcn_idle_work_handler ( struct work_struct * work )
{
struct amdgpu_device * adev =
container_of ( work , struct amdgpu_device , vcn . idle_work . work ) ;
2018-09-21 14:43:18 -04:00
unsigned int fences = 0 ;
unsigned int i ;
2018-05-17 13:03:05 -05:00
for ( i = 0 ; i < adev - > vcn . num_enc_rings ; + + i ) {
fences + = amdgpu_fence_count_emitted ( & adev - > vcn . ring_enc [ i ] ) ;
}
2017-02-06 10:52:46 -05:00
2018-09-21 14:43:18 -04:00
if ( adev - > pg_flags & AMD_PG_SUPPORT_VCN_DPG ) {
struct dpg_pause_state new_state ;
if ( fences )
new_state . fw_based = VCN_DPG_STATE__PAUSE ;
else
new_state . fw_based = VCN_DPG_STATE__UNPAUSE ;
if ( amdgpu_fence_count_emitted ( & adev - > vcn . ring_jpeg ) )
new_state . jpeg = VCN_DPG_STATE__PAUSE ;
else
new_state . jpeg = VCN_DPG_STATE__UNPAUSE ;
2019-05-13 12:41:54 -04:00
adev - > vcn . pause_dpg_mode ( adev , & new_state ) ;
2018-09-21 14:43:18 -04:00
}
2018-07-09 11:59:01 -04:00
fences + = amdgpu_fence_count_emitted ( & adev - > vcn . ring_jpeg ) ;
2018-09-21 14:43:18 -04:00
fences + = amdgpu_fence_count_emitted ( & adev - > vcn . ring_dec ) ;
2018-07-09 11:59:01 -04:00
2017-02-06 10:52:46 -05:00
if ( fences = = 0 ) {
2018-07-27 17:00:02 +08:00
amdgpu_gfx_off_ctrl ( adev , true ) ;
2018-05-16 20:18:22 +08:00
if ( adev - > pm . dpm_enabled )
2017-02-06 10:52:46 -05:00
amdgpu_dpm_enable_uvd ( adev , false ) ;
2018-05-16 20:18:22 +08:00
else
amdgpu_device_ip_set_powergating_state ( adev , AMD_IP_BLOCK_TYPE_VCN ,
AMD_PG_STATE_GATE ) ;
2017-02-06 10:52:46 -05:00
} else {
schedule_delayed_work ( & adev - > vcn . idle_work , VCN_IDLE_TIMEOUT ) ;
}
}
void amdgpu_vcn_ring_begin_use ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
bool set_clocks = ! cancel_delayed_work_sync ( & adev - > vcn . idle_work ) ;
2018-07-04 13:35:56 -04:00
if ( set_clocks ) {
2018-07-27 17:00:02 +08:00
amdgpu_gfx_off_ctrl ( adev , false ) ;
2018-05-16 20:18:22 +08:00
if ( adev - > pm . dpm_enabled )
amdgpu_dpm_enable_uvd ( adev , true ) ;
else
amdgpu_device_ip_set_powergating_state ( adev , AMD_IP_BLOCK_TYPE_VCN ,
AMD_PG_STATE_UNGATE ) ;
2017-02-06 10:52:46 -05:00
}
2018-09-21 14:43:18 -04:00
if ( adev - > pg_flags & AMD_PG_SUPPORT_VCN_DPG ) {
struct dpg_pause_state new_state ;
2018-12-12 14:53:12 -05:00
unsigned int fences = 0 ;
unsigned int i ;
2018-09-21 14:43:18 -04:00
2018-12-12 14:53:12 -05:00
for ( i = 0 ; i < adev - > vcn . num_enc_rings ; + + i ) {
fences + = amdgpu_fence_count_emitted ( & adev - > vcn . ring_enc [ i ] ) ;
}
if ( fences )
2018-09-21 14:43:18 -04:00
new_state . fw_based = VCN_DPG_STATE__PAUSE ;
else
2018-12-12 14:53:12 -05:00
new_state . fw_based = VCN_DPG_STATE__UNPAUSE ;
2018-09-21 14:43:18 -04:00
2018-12-12 14:53:12 -05:00
if ( amdgpu_fence_count_emitted ( & adev - > vcn . ring_jpeg ) )
2018-09-21 14:43:18 -04:00
new_state . jpeg = VCN_DPG_STATE__PAUSE ;
else
2018-12-12 14:53:12 -05:00
new_state . jpeg = VCN_DPG_STATE__UNPAUSE ;
if ( ring - > funcs - > type = = AMDGPU_RING_TYPE_VCN_ENC )
new_state . fw_based = VCN_DPG_STATE__PAUSE ;
else if ( ring - > funcs - > type = = AMDGPU_RING_TYPE_VCN_JPEG )
new_state . jpeg = VCN_DPG_STATE__PAUSE ;
2018-09-21 14:43:18 -04:00
2019-05-13 12:41:54 -04:00
adev - > vcn . pause_dpg_mode ( adev , & new_state ) ;
2018-09-21 14:43:18 -04:00
}
2017-02-06 10:52:46 -05:00
}
void amdgpu_vcn_ring_end_use ( struct amdgpu_ring * ring )
{
schedule_delayed_work ( & ring - > adev - > vcn . idle_work , VCN_IDLE_TIMEOUT ) ;
}
2017-02-06 11:52:46 -05:00
int amdgpu_vcn_dec_ring_test_ring ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
uint32_t tmp = 0 ;
unsigned i ;
int r ;
2018-10-17 14:33:48 -04:00
WREG32 ( adev - > vcn . external . scratch9 , 0xCAFEDEAD ) ;
2017-02-06 11:52:46 -05:00
r = amdgpu_ring_alloc ( ring , 3 ) ;
2018-10-29 10:48:31 +01:00
if ( r )
2017-02-06 11:52:46 -05:00
return r ;
2018-10-17 14:33:48 -04:00
amdgpu_ring_write ( ring , PACKET0 ( adev - > vcn . internal . scratch9 , 0 ) ) ;
2017-02-06 11:52:46 -05:00
amdgpu_ring_write ( ring , 0xDEADBEEF ) ;
amdgpu_ring_commit ( ring ) ;
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
2018-10-17 14:33:48 -04:00
tmp = RREG32 ( adev - > vcn . external . scratch9 ) ;
2017-02-06 11:52:46 -05:00
if ( tmp = = 0xDEADBEEF )
break ;
DRM_UDELAY ( 1 ) ;
}
2018-10-29 10:48:31 +01:00
if ( i > = adev - > usec_timeout )
r = - ETIMEDOUT ;
2017-02-06 11:52:46 -05:00
return r ;
}
2018-02-07 20:48:22 +01:00
static int amdgpu_vcn_dec_send_msg ( struct amdgpu_ring * ring ,
2018-05-25 10:53:39 -04:00
struct amdgpu_bo * bo ,
2018-02-07 20:48:22 +01:00
struct dma_fence * * fence )
2016-12-21 13:21:52 -05:00
{
2018-02-07 20:48:22 +01:00
struct amdgpu_device * adev = ring - > adev ;
struct dma_fence * f = NULL ;
2016-12-21 13:21:52 -05:00
struct amdgpu_job * job ;
struct amdgpu_ib * ib ;
uint64_t addr ;
int i , r ;
r = amdgpu_job_alloc_with_ib ( adev , 64 , & job ) ;
if ( r )
goto err ;
ib = & job - > ibs [ 0 ] ;
addr = amdgpu_bo_gpu_offset ( bo ) ;
2018-10-15 15:41:36 -04:00
ib - > ptr [ 0 ] = PACKET0 ( adev - > vcn . internal . data0 , 0 ) ;
2016-12-21 13:21:52 -05:00
ib - > ptr [ 1 ] = addr ;
2018-10-15 15:41:36 -04:00
ib - > ptr [ 2 ] = PACKET0 ( adev - > vcn . internal . data1 , 0 ) ;
2016-12-21 13:21:52 -05:00
ib - > ptr [ 3 ] = addr > > 32 ;
2018-10-15 15:41:36 -04:00
ib - > ptr [ 4 ] = PACKET0 ( adev - > vcn . internal . cmd , 0 ) ;
2016-12-21 13:21:52 -05:00
ib - > ptr [ 5 ] = 0 ;
for ( i = 6 ; i < 16 ; i + = 2 ) {
2018-10-15 15:41:36 -04:00
ib - > ptr [ i ] = PACKET0 ( adev - > vcn . internal . nop , 0 ) ;
2016-12-21 13:21:52 -05:00
ib - > ptr [ i + 1 ] = 0 ;
}
ib - > length_dw = 16 ;
2018-07-13 16:29:10 +02:00
r = amdgpu_job_submit_direct ( job , ring , & f ) ;
2018-05-25 10:53:39 -04:00
if ( r )
goto err_free ;
2016-12-21 13:21:52 -05:00
2018-02-07 20:48:22 +01:00
amdgpu_bo_fence ( bo , f , false ) ;
amdgpu_bo_unreserve ( bo ) ;
amdgpu_bo_unref ( & bo ) ;
2016-12-21 13:21:52 -05:00
if ( fence )
* fence = dma_fence_get ( f ) ;
dma_fence_put ( f ) ;
return 0 ;
err_free :
amdgpu_job_free ( job ) ;
err :
2018-02-07 20:48:22 +01:00
amdgpu_bo_unreserve ( bo ) ;
amdgpu_bo_unref ( & bo ) ;
2016-12-21 13:21:52 -05:00
return r ;
}
static int amdgpu_vcn_dec_get_create_msg ( struct amdgpu_ring * ring , uint32_t handle ,
struct dma_fence * * fence )
{
struct amdgpu_device * adev = ring - > adev ;
2018-02-07 20:48:22 +01:00
struct amdgpu_bo * bo = NULL ;
2016-12-21 13:21:52 -05:00
uint32_t * msg ;
int r , i ;
2018-02-07 20:48:22 +01:00
r = amdgpu_bo_create_reserved ( adev , 1024 , PAGE_SIZE ,
AMDGPU_GEM_DOMAIN_VRAM ,
& bo , NULL , ( void * * ) & msg ) ;
2016-12-21 13:21:52 -05:00
if ( r )
return r ;
2017-02-05 12:40:30 -05:00
msg [ 0 ] = cpu_to_le32 ( 0x00000028 ) ;
2017-03-30 12:00:25 -04:00
msg [ 1 ] = cpu_to_le32 ( 0x00000038 ) ;
2017-02-05 12:40:30 -05:00
msg [ 2 ] = cpu_to_le32 ( 0x00000001 ) ;
2016-12-21 13:21:52 -05:00
msg [ 3 ] = cpu_to_le32 ( 0x00000000 ) ;
2017-02-05 12:40:30 -05:00
msg [ 4 ] = cpu_to_le32 ( handle ) ;
2016-12-21 13:21:52 -05:00
msg [ 5 ] = cpu_to_le32 ( 0x00000000 ) ;
2017-02-05 12:40:30 -05:00
msg [ 6 ] = cpu_to_le32 ( 0x00000001 ) ;
msg [ 7 ] = cpu_to_le32 ( 0x00000028 ) ;
2017-03-30 12:00:25 -04:00
msg [ 8 ] = cpu_to_le32 ( 0x00000010 ) ;
2016-12-21 13:21:52 -05:00
msg [ 9 ] = cpu_to_le32 ( 0x00000000 ) ;
2017-02-05 12:40:30 -05:00
msg [ 10 ] = cpu_to_le32 ( 0x00000007 ) ;
msg [ 11 ] = cpu_to_le32 ( 0x00000000 ) ;
2017-03-30 12:00:25 -04:00
msg [ 12 ] = cpu_to_le32 ( 0x00000780 ) ;
msg [ 13 ] = cpu_to_le32 ( 0x00000440 ) ;
for ( i = 14 ; i < 1024 ; + + i )
2016-12-21 13:21:52 -05:00
msg [ i ] = cpu_to_le32 ( 0x0 ) ;
2018-05-25 10:53:39 -04:00
return amdgpu_vcn_dec_send_msg ( ring , bo , fence ) ;
2016-12-21 13:21:52 -05:00
}
static int amdgpu_vcn_dec_get_destroy_msg ( struct amdgpu_ring * ring , uint32_t handle ,
2018-05-25 10:53:39 -04:00
struct dma_fence * * fence )
2016-12-21 13:21:52 -05:00
{
struct amdgpu_device * adev = ring - > adev ;
2018-02-07 20:48:22 +01:00
struct amdgpu_bo * bo = NULL ;
2016-12-21 13:21:52 -05:00
uint32_t * msg ;
int r , i ;
2018-02-07 20:48:22 +01:00
r = amdgpu_bo_create_reserved ( adev , 1024 , PAGE_SIZE ,
AMDGPU_GEM_DOMAIN_VRAM ,
& bo , NULL , ( void * * ) & msg ) ;
2016-12-21 13:21:52 -05:00
if ( r )
return r ;
2017-02-05 12:40:30 -05:00
msg [ 0 ] = cpu_to_le32 ( 0x00000028 ) ;
msg [ 1 ] = cpu_to_le32 ( 0x00000018 ) ;
msg [ 2 ] = cpu_to_le32 ( 0x00000000 ) ;
msg [ 3 ] = cpu_to_le32 ( 0x00000002 ) ;
msg [ 4 ] = cpu_to_le32 ( handle ) ;
msg [ 5 ] = cpu_to_le32 ( 0x00000000 ) ;
for ( i = 6 ; i < 1024 ; + + i )
2016-12-21 13:21:52 -05:00
msg [ i ] = cpu_to_le32 ( 0x0 ) ;
2018-05-25 10:53:39 -04:00
return amdgpu_vcn_dec_send_msg ( ring , bo , fence ) ;
2016-12-21 13:21:52 -05:00
}
int amdgpu_vcn_dec_ring_test_ib ( struct amdgpu_ring * ring , long timeout )
{
struct dma_fence * fence ;
long r ;
r = amdgpu_vcn_dec_get_create_msg ( ring , 1 , NULL ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2016-12-21 13:21:52 -05:00
goto error ;
2018-05-25 10:53:39 -04:00
r = amdgpu_vcn_dec_get_destroy_msg ( ring , 1 , & fence ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2016-12-21 13:21:52 -05:00
goto error ;
r = dma_fence_wait_timeout ( fence , false , timeout ) ;
2018-10-29 16:12:42 +01:00
if ( r = = 0 )
2016-12-21 13:21:52 -05:00
r = - ETIMEDOUT ;
2018-10-29 16:12:42 +01:00
else if ( r > 0 )
2016-12-21 13:21:52 -05:00
r = 0 ;
dma_fence_put ( fence ) ;
error :
return r ;
}
2016-12-21 13:56:44 -05:00
2017-02-06 10:52:46 -05:00
int amdgpu_vcn_enc_ring_test_ring ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
2019-06-04 21:25:03 +05:30
uint32_t rptr ;
2017-02-06 10:52:46 -05:00
unsigned i ;
int r ;
r = amdgpu_ring_alloc ( ring , 16 ) ;
2018-10-29 10:48:31 +01:00
if ( r )
2017-02-06 10:52:46 -05:00
return r ;
2018-10-29 10:48:31 +01:00
2019-06-04 21:25:03 +05:30
rptr = amdgpu_ring_get_rptr ( ring ) ;
2017-02-21 10:38:42 -05:00
amdgpu_ring_write ( ring , VCN_ENC_CMD_END ) ;
2017-02-06 10:52:46 -05:00
amdgpu_ring_commit ( ring ) ;
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
if ( amdgpu_ring_get_rptr ( ring ) ! = rptr )
break ;
DRM_UDELAY ( 1 ) ;
}
2018-10-29 10:48:31 +01:00
if ( i > = adev - > usec_timeout )
2017-02-06 10:52:46 -05:00
r = - ETIMEDOUT ;
return r ;
}
2016-12-21 13:56:44 -05:00
static int amdgpu_vcn_enc_get_create_msg ( struct amdgpu_ring * ring , uint32_t handle ,
struct dma_fence * * fence )
{
2017-05-08 17:31:31 -04:00
const unsigned ib_size_dw = 16 ;
2016-12-21 13:56:44 -05:00
struct amdgpu_job * job ;
struct amdgpu_ib * ib ;
struct dma_fence * f = NULL ;
uint64_t dummy ;
int i , r ;
r = amdgpu_job_alloc_with_ib ( ring - > adev , ib_size_dw * 4 , & job ) ;
if ( r )
return r ;
ib = & job - > ibs [ 0 ] ;
dummy = ib - > gpu_addr + 1024 ;
ib - > length_dw = 0 ;
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000018 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ; /* session info */
2016-12-21 13:56:44 -05:00
ib - > ptr [ ib - > length_dw + + ] = handle ;
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( dummy ) ;
ib - > ptr [ ib - > length_dw + + ] = dummy ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000000b ;
2016-12-21 13:56:44 -05:00
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000014 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000002 ; /* task info */
ib - > ptr [ ib - > length_dw + + ] = 0x0000001c ;
2016-12-21 13:56:44 -05:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000008 ;
ib - > ptr [ ib - > length_dw + + ] = 0x08000001 ; /* op initialize */
2016-12-21 13:56:44 -05:00
for ( i = ib - > length_dw ; i < ib_size_dw ; + + i )
ib - > ptr [ i ] = 0x0 ;
2018-07-13 16:29:10 +02:00
r = amdgpu_job_submit_direct ( job , ring , & f ) ;
2016-12-21 13:56:44 -05:00
if ( r )
goto err ;
if ( fence )
* fence = dma_fence_get ( f ) ;
dma_fence_put ( f ) ;
2017-05-08 17:31:31 -04:00
2016-12-21 13:56:44 -05:00
return 0 ;
err :
amdgpu_job_free ( job ) ;
return r ;
}
static int amdgpu_vcn_enc_get_destroy_msg ( struct amdgpu_ring * ring , uint32_t handle ,
2017-05-08 17:31:31 -04:00
struct dma_fence * * fence )
2016-12-21 13:56:44 -05:00
{
2017-05-08 17:31:31 -04:00
const unsigned ib_size_dw = 16 ;
2016-12-21 13:56:44 -05:00
struct amdgpu_job * job ;
struct amdgpu_ib * ib ;
struct dma_fence * f = NULL ;
2017-05-08 17:31:31 -04:00
uint64_t dummy ;
2016-12-21 13:56:44 -05:00
int i , r ;
r = amdgpu_job_alloc_with_ib ( ring - > adev , ib_size_dw * 4 , & job ) ;
if ( r )
return r ;
ib = & job - > ibs [ 0 ] ;
2017-05-08 17:31:31 -04:00
dummy = ib - > gpu_addr + 1024 ;
2016-12-21 13:56:44 -05:00
ib - > length_dw = 0 ;
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000018 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ;
2016-12-21 13:56:44 -05:00
ib - > ptr [ ib - > length_dw + + ] = handle ;
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( dummy ) ;
ib - > ptr [ ib - > length_dw + + ] = dummy ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000000b ;
2016-12-21 13:56:44 -05:00
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000014 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000002 ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000001c ;
2016-12-21 13:56:44 -05:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
2017-05-08 17:31:31 -04:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000008 ;
ib - > ptr [ ib - > length_dw + + ] = 0x08000002 ; /* op close session */
2016-12-21 13:56:44 -05:00
for ( i = ib - > length_dw ; i < ib_size_dw ; + + i )
ib - > ptr [ i ] = 0x0 ;
2018-07-13 16:29:10 +02:00
r = amdgpu_job_submit_direct ( job , ring , & f ) ;
2017-05-08 17:31:31 -04:00
if ( r )
goto err ;
2016-12-21 13:56:44 -05:00
if ( fence )
* fence = dma_fence_get ( f ) ;
dma_fence_put ( f ) ;
2017-05-08 17:31:31 -04:00
2016-12-21 13:56:44 -05:00
return 0 ;
err :
amdgpu_job_free ( job ) ;
return r ;
}
int amdgpu_vcn_enc_ring_test_ib ( struct amdgpu_ring * ring , long timeout )
{
struct dma_fence * fence = NULL ;
long r ;
r = amdgpu_vcn_enc_get_create_msg ( ring , 1 , NULL ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2016-12-21 13:56:44 -05:00
goto error ;
2017-05-08 17:31:31 -04:00
r = amdgpu_vcn_enc_get_destroy_msg ( ring , 1 , & fence ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2016-12-21 13:56:44 -05:00
goto error ;
r = dma_fence_wait_timeout ( fence , false , timeout ) ;
2018-10-29 16:12:42 +01:00
if ( r = = 0 )
2016-12-21 13:56:44 -05:00
r = - ETIMEDOUT ;
2018-10-29 16:12:42 +01:00
else if ( r > 0 )
2016-12-21 13:56:44 -05:00
r = 0 ;
2018-10-29 16:12:42 +01:00
2016-12-21 13:56:44 -05:00
error :
dma_fence_put ( fence ) ;
return r ;
}
2018-05-30 15:49:51 -04:00
int amdgpu_vcn_jpeg_ring_test_ring ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
uint32_t tmp = 0 ;
unsigned i ;
int r ;
2018-12-03 11:42:28 -05:00
WREG32 ( adev - > vcn . external . jpeg_pitch , 0xCAFEDEAD ) ;
2018-05-30 15:49:51 -04:00
r = amdgpu_ring_alloc ( ring , 3 ) ;
2018-10-29 10:48:31 +01:00
if ( r )
2018-05-30 15:49:51 -04:00
return r ;
2018-12-03 11:42:28 -05:00
amdgpu_ring_write ( ring , PACKET0 ( adev - > vcn . internal . jpeg_pitch , 0 ) ) ;
2018-05-30 15:49:51 -04:00
amdgpu_ring_write ( ring , 0xDEADBEEF ) ;
amdgpu_ring_commit ( ring ) ;
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
2018-12-03 11:42:28 -05:00
tmp = RREG32 ( adev - > vcn . external . jpeg_pitch ) ;
2018-05-30 15:49:51 -04:00
if ( tmp = = 0xDEADBEEF )
break ;
DRM_UDELAY ( 1 ) ;
}
2018-10-29 10:48:31 +01:00
if ( i > = adev - > usec_timeout )
r = - ETIMEDOUT ;
2018-05-30 15:49:51 -04:00
return r ;
}
2018-05-30 15:56:43 -04:00
static int amdgpu_vcn_jpeg_set_reg ( struct amdgpu_ring * ring , uint32_t handle ,
struct dma_fence * * fence )
{
struct amdgpu_device * adev = ring - > adev ;
struct amdgpu_job * job ;
struct amdgpu_ib * ib ;
struct dma_fence * f = NULL ;
const unsigned ib_size_dw = 16 ;
int i , r ;
r = amdgpu_job_alloc_with_ib ( ring - > adev , ib_size_dw * 4 , & job ) ;
if ( r )
return r ;
ib = & job - > ibs [ 0 ] ;
2018-10-15 16:17:27 -04:00
ib - > ptr [ 0 ] = PACKETJ ( adev - > vcn . internal . jpeg_pitch , 0 , 0 , PACKETJ_TYPE0 ) ;
2018-05-30 15:56:43 -04:00
ib - > ptr [ 1 ] = 0xDEADBEEF ;
for ( i = 2 ; i < 16 ; i + = 2 ) {
ib - > ptr [ i ] = PACKETJ ( 0 , 0 , 0 , PACKETJ_TYPE6 ) ;
ib - > ptr [ i + 1 ] = 0 ;
}
ib - > length_dw = 16 ;
2018-07-13 16:29:10 +02:00
r = amdgpu_job_submit_direct ( job , ring , & f ) ;
2018-05-30 15:56:43 -04:00
if ( r )
goto err ;
if ( fence )
* fence = dma_fence_get ( f ) ;
dma_fence_put ( f ) ;
return 0 ;
err :
amdgpu_job_free ( job ) ;
return r ;
}
int amdgpu_vcn_jpeg_ring_test_ib ( struct amdgpu_ring * ring , long timeout )
{
struct amdgpu_device * adev = ring - > adev ;
uint32_t tmp = 0 ;
unsigned i ;
struct dma_fence * fence = NULL ;
long r = 0 ;
r = amdgpu_vcn_jpeg_set_reg ( ring , 1 , & fence ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2018-05-30 15:56:43 -04:00
goto error ;
r = dma_fence_wait_timeout ( fence , false , timeout ) ;
if ( r = = 0 ) {
r = - ETIMEDOUT ;
goto error ;
} else if ( r < 0 ) {
goto error ;
2018-10-29 16:12:42 +01:00
} else {
2018-05-30 15:56:43 -04:00
r = 0 ;
2018-10-29 16:12:42 +01:00
}
2018-05-30 15:56:43 -04:00
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
2018-10-15 16:17:27 -04:00
tmp = RREG32 ( adev - > vcn . external . jpeg_pitch ) ;
2018-05-30 15:56:43 -04:00
if ( tmp = = 0xDEADBEEF )
break ;
DRM_UDELAY ( 1 ) ;
}
2018-10-29 16:12:42 +01:00
if ( i > = adev - > usec_timeout )
r = - ETIMEDOUT ;
2018-05-30 15:56:43 -04:00
dma_fence_put ( fence ) ;
error :
return r ;
}