2015-04-20 16:55:21 -04:00
/*
* Copyright 2013 Advanced Micro Devices , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* Authors : Christian König < christian . koenig @ amd . com >
*/
# include <linux/firmware.h>
# include <linux/module.h>
2019-06-10 00:07:56 +02:00
2015-04-20 16:55:21 -04:00
# include <drm/drm.h>
# include "amdgpu.h"
# include "amdgpu_pm.h"
# include "amdgpu_vce.h"
# include "cikd.h"
/* 1 second timeout */
2016-07-01 17:43:57 +02:00
# define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
2015-04-20 16:55:21 -04:00
/* Firmware Names */
# ifdef CONFIG_DRM_AMDGPU_CIK
2018-07-02 14:32:28 -05:00
# define FIRMWARE_BONAIRE "amdgpu / bonaire_vce.bin"
# define FIRMWARE_KABINI "amdgpu / kabini_vce.bin"
# define FIRMWARE_KAVERI "amdgpu / kaveri_vce.bin"
# define FIRMWARE_HAWAII "amdgpu / hawaii_vce.bin"
# define FIRMWARE_MULLINS "amdgpu / mullins_vce.bin"
2015-04-20 16:55:21 -04:00
# endif
2015-05-13 22:49:04 +08:00
# define FIRMWARE_TONGA "amdgpu / tonga_vce.bin"
# define FIRMWARE_CARRIZO "amdgpu / carrizo_vce.bin"
2015-07-27 14:24:14 -04:00
# define FIRMWARE_FIJI "amdgpu / fiji_vce.bin"
2015-10-08 16:27:55 -04:00
# define FIRMWARE_STONEY "amdgpu / stoney_vce.bin"
2016-03-14 18:33:29 -04:00
# define FIRMWARE_POLARIS10 "amdgpu / polaris10_vce.bin"
2018-04-11 15:25:57 -05:00
# define FIRMWARE_POLARIS11 "amdgpu / polaris11_vce.bin"
# define FIRMWARE_POLARIS12 "amdgpu / polaris12_vce.bin"
# define FIRMWARE_VEGAM "amdgpu / vegam_vce.bin"
2015-04-20 16:55:21 -04:00
2017-03-03 18:27:49 -05:00
# define FIRMWARE_VEGA10 "amdgpu / vega10_vce.bin"
2017-09-01 16:37:21 -04:00
# define FIRMWARE_VEGA12 "amdgpu / vega12_vce.bin"
2018-04-20 13:46:49 +08:00
# define FIRMWARE_VEGA20 "amdgpu / vega20_vce.bin"
2017-03-03 18:27:49 -05:00
2015-04-20 16:55:21 -04:00
# ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE ( FIRMWARE_BONAIRE ) ;
MODULE_FIRMWARE ( FIRMWARE_KABINI ) ;
MODULE_FIRMWARE ( FIRMWARE_KAVERI ) ;
MODULE_FIRMWARE ( FIRMWARE_HAWAII ) ;
MODULE_FIRMWARE ( FIRMWARE_MULLINS ) ;
# endif
MODULE_FIRMWARE ( FIRMWARE_TONGA ) ;
MODULE_FIRMWARE ( FIRMWARE_CARRIZO ) ;
2015-07-27 14:24:14 -04:00
MODULE_FIRMWARE ( FIRMWARE_FIJI ) ;
2015-10-08 16:27:55 -04:00
MODULE_FIRMWARE ( FIRMWARE_STONEY ) ;
2016-03-14 18:33:29 -04:00
MODULE_FIRMWARE ( FIRMWARE_POLARIS10 ) ;
MODULE_FIRMWARE ( FIRMWARE_POLARIS11 ) ;
2016-12-14 15:32:28 -05:00
MODULE_FIRMWARE ( FIRMWARE_POLARIS12 ) ;
2018-04-11 15:25:57 -05:00
MODULE_FIRMWARE ( FIRMWARE_VEGAM ) ;
2015-04-20 16:55:21 -04:00
2017-03-03 18:27:49 -05:00
MODULE_FIRMWARE ( FIRMWARE_VEGA10 ) ;
2017-09-01 16:37:21 -04:00
MODULE_FIRMWARE ( FIRMWARE_VEGA12 ) ;
2018-04-20 13:46:49 +08:00
MODULE_FIRMWARE ( FIRMWARE_VEGA20 ) ;
2017-03-03 18:27:49 -05:00
2015-04-20 16:55:21 -04:00
static void amdgpu_vce_idle_work_handler ( struct work_struct * work ) ;
2019-10-17 11:41:13 -04:00
static int amdgpu_vce_get_create_msg ( struct amdgpu_ring * ring , uint32_t handle ,
struct amdgpu_bo * bo ,
struct dma_fence * * fence ) ;
static int amdgpu_vce_get_destroy_msg ( struct amdgpu_ring * ring , uint32_t handle ,
bool direct , struct dma_fence * * fence ) ;
2015-04-20 16:55:21 -04:00
/**
* amdgpu_vce_init - allocate memory , load vce firmware
*
* @ adev : amdgpu_device pointer
*
* First step to get VCE online , allocate memory and load the firmware
*/
2015-05-06 14:31:27 -04:00
int amdgpu_vce_sw_init ( struct amdgpu_device * adev , unsigned long size )
2015-04-20 16:55:21 -04:00
{
const char * fw_name ;
const struct common_firmware_header * hdr ;
unsigned ucode_version , version_major , version_minor , binary_id ;
int i , r ;
switch ( adev - > asic_type ) {
# ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE :
fw_name = FIRMWARE_BONAIRE ;
break ;
case CHIP_KAVERI :
fw_name = FIRMWARE_KAVERI ;
break ;
case CHIP_KABINI :
fw_name = FIRMWARE_KABINI ;
break ;
case CHIP_HAWAII :
fw_name = FIRMWARE_HAWAII ;
break ;
case CHIP_MULLINS :
fw_name = FIRMWARE_MULLINS ;
break ;
# endif
case CHIP_TONGA :
fw_name = FIRMWARE_TONGA ;
break ;
case CHIP_CARRIZO :
fw_name = FIRMWARE_CARRIZO ;
break ;
2015-07-27 14:24:14 -04:00
case CHIP_FIJI :
fw_name = FIRMWARE_FIJI ;
break ;
2015-10-08 16:27:55 -04:00
case CHIP_STONEY :
fw_name = FIRMWARE_STONEY ;
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS10 :
fw_name = FIRMWARE_POLARIS10 ;
2016-03-11 14:33:40 -05:00
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS11 :
fw_name = FIRMWARE_POLARIS11 ;
2016-03-11 14:33:40 -05:00
break ;
2017-09-01 16:37:21 -04:00
case CHIP_POLARIS12 :
fw_name = FIRMWARE_POLARIS12 ;
break ;
2018-04-11 15:25:57 -05:00
case CHIP_VEGAM :
fw_name = FIRMWARE_VEGAM ;
break ;
2017-03-03 18:27:49 -05:00
case CHIP_VEGA10 :
fw_name = FIRMWARE_VEGA10 ;
break ;
2017-09-01 16:37:21 -04:00
case CHIP_VEGA12 :
fw_name = FIRMWARE_VEGA12 ;
2016-12-14 15:32:28 -05:00
break ;
2018-04-20 13:46:49 +08:00
case CHIP_VEGA20 :
fw_name = FIRMWARE_VEGA20 ;
break ;
2015-04-20 16:55:21 -04:00
default :
return - EINVAL ;
}
r = request_firmware ( & adev - > vce . fw , fw_name , adev - > dev ) ;
if ( r ) {
dev_err ( adev - > dev , " amdgpu_vce: Can't load firmware \" %s \" \n " ,
fw_name ) ;
return r ;
}
r = amdgpu_ucode_validate ( adev - > vce . fw ) ;
if ( r ) {
dev_err ( adev - > dev , " amdgpu_vce: Can't validate firmware \" %s \" \n " ,
fw_name ) ;
release_firmware ( adev - > vce . fw ) ;
adev - > vce . fw = NULL ;
return r ;
}
hdr = ( const struct common_firmware_header * ) adev - > vce . fw - > data ;
ucode_version = le32_to_cpu ( hdr - > ucode_version ) ;
version_major = ( ucode_version > > 20 ) & 0xfff ;
version_minor = ( ucode_version > > 8 ) & 0xfff ;
binary_id = ucode_version & 0xff ;
DRM_INFO ( " Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd \n " ,
version_major , version_minor , binary_id ) ;
adev - > vce . fw_version = ( ( version_major < < 24 ) | ( version_minor < < 16 ) |
( binary_id < < 8 ) ) ;
2017-05-31 14:13:20 -04:00
r = amdgpu_bo_create_kernel ( adev , size , PAGE_SIZE ,
AMDGPU_GEM_DOMAIN_VRAM , & adev - > vce . vcpu_bo ,
& adev - > vce . gpu_addr , & adev - > vce . cpu_addr ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
dev_err ( adev - > dev , " (%d) failed to allocate VCE bo \n " , r ) ;
return r ;
}
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i ) {
atomic_set ( & adev - > vce . handles [ i ] , 0 ) ;
adev - > vce . filp [ i ] = NULL ;
}
2016-07-20 16:53:36 +02:00
INIT_DELAYED_WORK ( & adev - > vce . idle_work , amdgpu_vce_idle_work_handler ) ;
mutex_init ( & adev - > vce . idle_mutex ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
/**
* amdgpu_vce_fini - free memory
*
* @ adev : amdgpu_device pointer
*
* Last step on VCE teardown , free firmware memory
*/
int amdgpu_vce_sw_fini ( struct amdgpu_device * adev )
{
2016-09-25 23:34:49 +03:00
unsigned i ;
2015-04-20 16:55:21 -04:00
if ( adev - > vce . vcpu_bo = = NULL )
return 0 ;
2019-11-08 18:06:07 +08:00
cancel_delayed_work_sync ( & adev - > vce . idle_work ) ;
2018-07-20 17:51:05 +05:30
drm_sched_entity_destroy ( & adev - > vce . entity ) ;
2016-02-10 17:43:00 +01:00
2017-05-31 14:13:20 -04:00
amdgpu_bo_free_kernel ( & adev - > vce . vcpu_bo , & adev - > vce . gpu_addr ,
( void * * ) & adev - > vce . cpu_addr ) ;
2015-04-20 16:55:21 -04:00
2016-09-25 23:34:49 +03:00
for ( i = 0 ; i < adev - > vce . num_rings ; i + + )
amdgpu_ring_fini ( & adev - > vce . ring [ i ] ) ;
2015-04-20 16:55:21 -04:00
release_firmware ( adev - > vce . fw ) ;
2016-07-20 16:53:36 +02:00
mutex_destroy ( & adev - > vce . idle_mutex ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2018-08-13 14:46:06 +08:00
/**
* amdgpu_vce_entity_init - init entity
*
* @ adev : amdgpu_device pointer
*
*/
int amdgpu_vce_entity_init ( struct amdgpu_device * adev )
{
struct amdgpu_ring * ring ;
2019-12-05 11:38:00 +01:00
struct drm_gpu_scheduler * sched ;
2018-08-13 14:46:06 +08:00
int r ;
ring = & adev - > vce . ring [ 0 ] ;
2019-12-05 11:38:00 +01:00
sched = & ring - > sched ;
r = drm_sched_entity_init ( & adev - > vce . entity , DRM_SCHED_PRIORITY_NORMAL ,
& sched , 1 , NULL ) ;
2018-08-13 14:46:06 +08:00
if ( r ! = 0 ) {
DRM_ERROR ( " Failed setting up VCE run queue. \n " ) ;
return r ;
}
return 0 ;
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_vce_suspend - unpin VCE fw memory
*
* @ adev : amdgpu_device pointer
*
*/
int amdgpu_vce_suspend ( struct amdgpu_device * adev )
{
int i ;
2018-09-27 20:48:39 +08:00
cancel_delayed_work_sync ( & adev - > vce . idle_work ) ;
2015-04-20 16:55:21 -04:00
if ( adev - > vce . vcpu_bo = = NULL )
return 0 ;
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i )
if ( atomic_read ( & adev - > vce . handles [ i ] ) )
break ;
if ( i = = AMDGPU_MAX_VCE_HANDLES )
return 0 ;
/* TODO: suspending running encoding sessions isn't supported */
return - EINVAL ;
}
/**
* amdgpu_vce_resume - pin VCE fw memory
*
* @ adev : amdgpu_device pointer
*
*/
int amdgpu_vce_resume ( struct amdgpu_device * adev )
{
void * cpu_addr ;
const struct common_firmware_header * hdr ;
unsigned offset ;
int r ;
if ( adev - > vce . vcpu_bo = = NULL )
return - EINVAL ;
r = amdgpu_bo_reserve ( adev - > vce . vcpu_bo , false ) ;
if ( r ) {
dev_err ( adev - > dev , " (%d) failed to reserve VCE bo \n " , r ) ;
return r ;
}
r = amdgpu_bo_kmap ( adev - > vce . vcpu_bo , & cpu_addr ) ;
if ( r ) {
amdgpu_bo_unreserve ( adev - > vce . vcpu_bo ) ;
dev_err ( adev - > dev , " (%d) VCE map failed \n " , r ) ;
return r ;
}
hdr = ( const struct common_firmware_header * ) adev - > vce . fw - > data ;
offset = le32_to_cpu ( hdr - > ucode_array_offset_bytes ) ;
2016-08-23 11:18:59 +02:00
memcpy_toio ( cpu_addr , adev - > vce . fw - > data + offset ,
adev - > vce . fw - > size - offset ) ;
2015-04-20 16:55:21 -04:00
amdgpu_bo_kunmap ( adev - > vce . vcpu_bo ) ;
amdgpu_bo_unreserve ( adev - > vce . vcpu_bo ) ;
return 0 ;
}
/**
* amdgpu_vce_idle_work_handler - power off VCE
*
* @ work : pointer to work structure
*
* power of VCE when it ' s not used any more
*/
static void amdgpu_vce_idle_work_handler ( struct work_struct * work )
{
struct amdgpu_device * adev =
container_of ( work , struct amdgpu_device , vce . idle_work . work ) ;
2016-09-26 15:19:14 -04:00
unsigned i , count = 0 ;
2015-04-20 16:55:21 -04:00
2016-09-26 15:19:14 -04:00
for ( i = 0 ; i < adev - > vce . num_rings ; i + + )
count + = amdgpu_fence_count_emitted ( & adev - > vce . ring [ i ] ) ;
if ( count = = 0 ) {
2015-04-20 16:55:21 -04:00
if ( adev - > pm . dpm_enabled ) {
amdgpu_dpm_enable_vce ( adev , false ) ;
} else {
amdgpu_asic_set_vce_clocks ( adev , 0 , 0 ) ;
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_set_powergating_state ( adev , AMD_IP_BLOCK_TYPE_VCE ,
AMD_PG_STATE_GATE ) ;
amdgpu_device_ip_set_clockgating_state ( adev , AMD_IP_BLOCK_TYPE_VCE ,
AMD_CG_STATE_GATE ) ;
2015-04-20 16:55:21 -04:00
}
} else {
2016-07-01 17:43:57 +02:00
schedule_delayed_work ( & adev - > vce . idle_work , VCE_IDLE_TIMEOUT ) ;
2015-04-20 16:55:21 -04:00
}
}
/**
2016-07-20 16:53:36 +02:00
* amdgpu_vce_ring_begin_use - power up VCE
2015-04-20 16:55:21 -04:00
*
2016-07-20 16:53:36 +02:00
* @ ring : amdgpu ring
2015-04-20 16:55:21 -04:00
*
* Make sure VCE is powerd up when we want to use it
*/
2016-07-20 16:53:36 +02:00
void amdgpu_vce_ring_begin_use ( struct amdgpu_ring * ring )
2015-04-20 16:55:21 -04:00
{
2016-07-20 16:53:36 +02:00
struct amdgpu_device * adev = ring - > adev ;
bool set_clocks ;
2015-04-20 16:55:21 -04:00
2017-03-07 14:45:25 +08:00
if ( amdgpu_sriov_vf ( adev ) )
return ;
2016-07-20 16:53:36 +02:00
mutex_lock ( & adev - > vce . idle_mutex ) ;
set_clocks = ! cancel_delayed_work_sync ( & adev - > vce . idle_work ) ;
2016-07-01 17:43:57 +02:00
if ( set_clocks ) {
2015-04-20 16:55:21 -04:00
if ( adev - > pm . dpm_enabled ) {
amdgpu_dpm_enable_vce ( adev , true ) ;
} else {
amdgpu_asic_set_vce_clocks ( adev , 53300 , 40000 ) ;
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_set_clockgating_state ( adev , AMD_IP_BLOCK_TYPE_VCE ,
AMD_CG_STATE_UNGATE ) ;
amdgpu_device_ip_set_powergating_state ( adev , AMD_IP_BLOCK_TYPE_VCE ,
AMD_PG_STATE_UNGATE ) ;
2017-01-25 17:35:14 +08:00
2015-04-20 16:55:21 -04:00
}
}
2016-07-20 16:53:36 +02:00
mutex_unlock ( & adev - > vce . idle_mutex ) ;
}
/**
* amdgpu_vce_ring_end_use - power VCE down
*
* @ ring : amdgpu ring
*
* Schedule work to power VCE down again
*/
void amdgpu_vce_ring_end_use ( struct amdgpu_ring * ring )
{
2018-01-19 20:29:17 +08:00
if ( ! amdgpu_sriov_vf ( ring - > adev ) )
schedule_delayed_work ( & ring - > adev - > vce . idle_work , VCE_IDLE_TIMEOUT ) ;
2015-04-20 16:55:21 -04:00
}
/**
* amdgpu_vce_free_handles - free still open VCE handles
*
* @ adev : amdgpu_device pointer
* @ filp : drm file pointer
*
* Close all VCE handles still open by this file pointer
*/
void amdgpu_vce_free_handles ( struct amdgpu_device * adev , struct drm_file * filp )
{
struct amdgpu_ring * ring = & adev - > vce . ring [ 0 ] ;
int i , r ;
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i ) {
uint32_t handle = atomic_read ( & adev - > vce . handles [ i ] ) ;
2016-07-01 17:43:57 +02:00
2015-04-20 16:55:21 -04:00
if ( ! handle | | adev - > vce . filp [ i ] ! = filp )
continue ;
2016-02-03 16:50:56 +01:00
r = amdgpu_vce_get_destroy_msg ( ring , handle , false , NULL ) ;
2015-04-20 16:55:21 -04:00
if ( r )
DRM_ERROR ( " Error destroying VCE handle (%d)! \n " , r ) ;
adev - > vce . filp [ i ] = NULL ;
atomic_set ( & adev - > vce . handles [ i ] , 0 ) ;
}
}
/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
* @ adev : amdgpu_device pointer
* @ ring : ring we should submit the msg to
* @ handle : VCE session handle to use
* @ fence : optional fence to return
*
* Open up a stream for HW test
*/
2019-10-17 11:41:13 -04:00
static int amdgpu_vce_get_create_msg ( struct amdgpu_ring * ring , uint32_t handle ,
struct amdgpu_bo * bo ,
struct dma_fence * * fence )
2015-04-20 16:55:21 -04:00
{
const unsigned ib_size_dw = 1024 ;
2016-02-01 12:20:25 +01:00
struct amdgpu_job * job ;
struct amdgpu_ib * ib ;
2016-10-25 13:00:45 +01:00
struct dma_fence * f = NULL ;
2019-10-17 11:36:47 -04:00
uint64_t addr ;
2015-04-20 16:55:21 -04:00
int i , r ;
2016-02-01 12:20:25 +01:00
r = amdgpu_job_alloc_with_ib ( ring - > adev , ib_size_dw * 4 , & job ) ;
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2016-02-01 12:20:25 +01:00
ib = & job - > ibs [ 0 ] ;
2015-04-20 16:55:21 -04:00
2019-10-17 11:36:47 -04:00
addr = amdgpu_bo_gpu_offset ( bo ) ;
2015-04-20 16:55:21 -04:00
/* stitch together an VCE create msg */
2015-07-03 14:18:26 +08:00
ib - > length_dw = 0 ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000000c ; /* len */
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ; /* session cmd */
ib - > ptr [ ib - > length_dw + + ] = handle ;
2015-11-18 11:57:33 -05:00
if ( ( ring - > adev - > vce . fw_version > > 24 ) > = 52 )
ib - > ptr [ ib - > length_dw + + ] = 0x00000040 ; /* len */
else
ib - > ptr [ ib - > length_dw + + ] = 0x00000030 ; /* len */
2015-07-03 14:18:26 +08:00
ib - > ptr [ ib - > length_dw + + ] = 0x01000001 ; /* create cmd */
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000042 ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000000a ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000080 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000060 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000100 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000100 ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000000c ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
2015-11-18 11:57:33 -05:00
if ( ( ring - > adev - > vce . fw_version > > 24 ) > = 52 ) {
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
}
2015-07-03 14:18:26 +08:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000014 ; /* len */
ib - > ptr [ ib - > length_dw + + ] = 0x05000005 ; /* feedback buffer */
2019-10-17 11:36:47 -04:00
ib - > ptr [ ib - > length_dw + + ] = upper_32_bits ( addr ) ;
ib - > ptr [ ib - > length_dw + + ] = addr ;
2015-07-03 14:18:26 +08:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ;
for ( i = ib - > length_dw ; i < ib_size_dw ; + + i )
ib - > ptr [ i ] = 0x0 ;
2018-07-13 16:29:10 +02:00
r = amdgpu_job_submit_direct ( job , ring , & f ) ;
2015-07-03 14:18:26 +08:00
if ( r )
goto err ;
2016-02-03 16:50:56 +01:00
2015-04-20 16:55:21 -04:00
if ( fence )
2016-10-25 13:00:45 +01:00
* fence = dma_fence_get ( f ) ;
dma_fence_put ( f ) ;
2016-01-15 11:25:00 +08:00
return 0 ;
2016-02-01 12:20:25 +01:00
2015-07-03 14:18:26 +08:00
err :
2016-02-01 12:20:25 +01:00
amdgpu_job_free ( job ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
/**
* amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
*
* @ adev : amdgpu_device pointer
* @ ring : ring we should submit the msg to
* @ handle : VCE session handle to use
* @ fence : optional fence to return
*
* Close up a stream for HW test or if userspace failed to do so
*/
2019-10-17 11:41:13 -04:00
static int amdgpu_vce_get_destroy_msg ( struct amdgpu_ring * ring , uint32_t handle ,
bool direct , struct dma_fence * * fence )
2015-04-20 16:55:21 -04:00
{
const unsigned ib_size_dw = 1024 ;
2016-02-01 12:20:25 +01:00
struct amdgpu_job * job ;
struct amdgpu_ib * ib ;
2016-10-25 13:00:45 +01:00
struct dma_fence * f = NULL ;
2015-04-20 16:55:21 -04:00
int i , r ;
2016-02-01 12:20:25 +01:00
r = amdgpu_job_alloc_with_ib ( ring - > adev , ib_size_dw * 4 , & job ) ;
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2016-02-01 12:20:25 +01:00
ib = & job - > ibs [ 0 ] ;
2015-04-20 16:55:21 -04:00
/* stitch together an VCE destroy msg */
2015-07-03 14:18:26 +08:00
ib - > length_dw = 0 ;
ib - > ptr [ ib - > length_dw + + ] = 0x0000000c ; /* len */
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ; /* session cmd */
ib - > ptr [ ib - > length_dw + + ] = handle ;
2016-07-21 20:46:55 +08:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000020 ; /* len */
ib - > ptr [ ib - > length_dw + + ] = 0x00000002 ; /* task info */
ib - > ptr [ ib - > length_dw + + ] = 0xffffffff ; /* next task info, set to 0xffffffff if no */
ib - > ptr [ ib - > length_dw + + ] = 0x00000001 ; /* destroy session */
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
ib - > ptr [ ib - > length_dw + + ] = 0xffffffff ; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
ib - > ptr [ ib - > length_dw + + ] = 0x00000000 ;
2015-07-03 14:18:26 +08:00
ib - > ptr [ ib - > length_dw + + ] = 0x00000008 ; /* len */
ib - > ptr [ ib - > length_dw + + ] = 0x02000001 ; /* destroy cmd */
for ( i = ib - > length_dw ; i < ib_size_dw ; + + i )
ib - > ptr [ i ] = 0x0 ;
2016-02-03 16:50:56 +01:00
2018-07-13 16:29:10 +02:00
if ( direct )
r = amdgpu_job_submit_direct ( job , ring , & f ) ;
else
2018-07-13 13:54:56 +02:00
r = amdgpu_job_submit ( job , & ring - > adev - > vce . entity ,
2016-02-03 16:50:56 +01:00
AMDGPU_FENCE_OWNER_UNDEFINED , & f ) ;
2018-07-13 16:29:10 +02:00
if ( r )
goto err ;
2016-02-03 16:50:56 +01:00
2015-04-20 16:55:21 -04:00
if ( fence )
2016-10-25 13:00:45 +01:00
* fence = dma_fence_get ( f ) ;
dma_fence_put ( f ) ;
2016-01-15 11:25:00 +08:00
return 0 ;
2016-02-01 12:20:25 +01:00
2015-07-03 14:18:26 +08:00
err :
2016-02-01 12:20:25 +01:00
amdgpu_job_free ( job ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
2017-11-17 11:09:43 +01:00
/**
* amdgpu_vce_cs_validate_bo - make sure not to cross 4 GB boundary
*
* @ p : parser context
* @ lo : address of lower dword
* @ hi : address of higher dword
* @ size : minimum size
* @ index : bs / fb index
*
* Make sure that no BO cross a 4 GB boundary .
*/
static int amdgpu_vce_validate_bo ( struct amdgpu_cs_parser * p , uint32_t ib_idx ,
int lo , int hi , unsigned size , int32_t index )
{
int64_t offset = ( ( uint64_t ) size ) * ( ( int64_t ) index ) ;
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { false , false } ;
2017-11-17 11:09:43 +01:00
struct amdgpu_bo_va_mapping * mapping ;
unsigned i , fpfn , lpfn ;
struct amdgpu_bo * bo ;
uint64_t addr ;
int r ;
addr = ( ( uint64_t ) amdgpu_get_ib_value ( p , ib_idx , lo ) ) |
( ( uint64_t ) amdgpu_get_ib_value ( p , ib_idx , hi ) ) < < 32 ;
if ( index > = 0 ) {
addr + = offset ;
fpfn = PAGE_ALIGN ( offset ) > > PAGE_SHIFT ;
lpfn = 0x100000000ULL > > PAGE_SHIFT ;
} else {
fpfn = 0 ;
lpfn = ( 0x100000000ULL - PAGE_ALIGN ( offset ) ) > > PAGE_SHIFT ;
}
r = amdgpu_cs_find_mapping ( p , addr , & bo , & mapping ) ;
if ( r ) {
DRM_ERROR ( " Can't find BO for addr 0x%010Lx %d %d %d %d \n " ,
addr , lo , hi , size , index ) ;
return r ;
}
for ( i = 0 ; i < bo - > placement . num_placement ; + + i ) {
bo - > placements [ i ] . fpfn = max ( bo - > placements [ i ] . fpfn , fpfn ) ;
2018-01-16 11:00:12 +01:00
bo - > placements [ i ] . lpfn = bo - > placements [ i ] . lpfn ?
min ( bo - > placements [ i ] . lpfn , lpfn ) : lpfn ;
2017-11-17 11:09:43 +01:00
}
2017-04-12 14:24:39 +02:00
return ttm_bo_validate ( & bo - > tbo , & bo - > placement , & ctx ) ;
2017-11-17 11:09:43 +01:00
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_vce_cs_reloc - command submission relocation
*
* @ p : parser context
* @ lo : address of lower dword
* @ hi : address of higher dword
2015-06-11 20:56:18 +02:00
* @ size : minimum size
2015-04-20 16:55:21 -04:00
*
* Patch relocation inside command stream with real buffer address
*/
2015-06-11 20:56:18 +02:00
static int amdgpu_vce_cs_reloc ( struct amdgpu_cs_parser * p , uint32_t ib_idx ,
2015-06-12 14:16:20 +02:00
int lo , int hi , unsigned size , uint32_t index )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_bo_va_mapping * mapping ;
struct amdgpu_bo * bo ;
uint64_t addr ;
2017-09-06 16:15:28 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2015-06-12 14:16:20 +02:00
if ( index = = 0xffffffff )
index = 0 ;
2015-04-20 16:55:21 -04:00
addr = ( ( uint64_t ) amdgpu_get_ib_value ( p , ib_idx , lo ) ) |
( ( uint64_t ) amdgpu_get_ib_value ( p , ib_idx , hi ) ) < < 32 ;
2015-06-12 14:16:20 +02:00
addr + = ( ( uint64_t ) size ) * ( ( uint64_t ) index ) ;
2015-04-20 16:55:21 -04:00
2017-09-06 16:15:28 +02:00
r = amdgpu_cs_find_mapping ( p , addr , & bo , & mapping ) ;
if ( r ) {
2015-06-12 14:16:20 +02:00
DRM_ERROR ( " Can't find BO for addr 0x%010Lx %d %d %d %d \n " ,
addr , lo , hi , size , index ) ;
2017-09-06 16:15:28 +02:00
return r ;
2015-04-20 16:55:21 -04:00
}
2015-06-11 20:56:18 +02:00
if ( ( addr + ( uint64_t ) size ) >
2017-03-30 14:03:59 +02:00
( mapping - > last + 1 ) * AMDGPU_GPU_PAGE_SIZE ) {
2020-01-23 00:22:16 +00:00
DRM_ERROR ( " BO too small for addr 0x%010Lx %d %d \n " ,
2015-06-11 20:56:18 +02:00
addr , lo , hi ) ;
return - EINVAL ;
}
2017-03-30 14:03:59 +02:00
addr - = mapping - > start * AMDGPU_GPU_PAGE_SIZE ;
2015-04-20 16:55:21 -04:00
addr + = amdgpu_bo_gpu_offset ( bo ) ;
2015-06-12 14:16:20 +02:00
addr - = ( ( uint64_t ) size ) * ( ( uint64_t ) index ) ;
2015-04-20 16:55:21 -04:00
2016-01-31 11:00:41 +01:00
amdgpu_set_ib_value ( p , ib_idx , lo , lower_32_bits ( addr ) ) ;
amdgpu_set_ib_value ( p , ib_idx , hi , upper_32_bits ( addr ) ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2015-06-11 20:56:18 +02:00
/**
* amdgpu_vce_validate_handle - validate stream handle
*
* @ p : parser context
* @ handle : handle to validate
2015-06-11 21:33:55 +02:00
* @ allocated : allocated a new handle ?
2015-06-11 20:56:18 +02:00
*
* Validates the handle and return the found session index or - EINVAL
* we we don ' t have another free session index .
*/
static int amdgpu_vce_validate_handle ( struct amdgpu_cs_parser * p ,
2016-07-01 22:19:25 +02:00
uint32_t handle , uint32_t * allocated )
2015-06-11 20:56:18 +02:00
{
unsigned i ;
/* validate the handle */
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i ) {
2015-06-11 21:33:55 +02:00
if ( atomic_read ( & p - > adev - > vce . handles [ i ] ) = = handle ) {
if ( p - > adev - > vce . filp [ i ] ! = p - > filp ) {
DRM_ERROR ( " VCE handle collision detected! \n " ) ;
return - EINVAL ;
}
2015-06-11 20:56:18 +02:00
return i ;
2015-06-11 21:33:55 +02:00
}
2015-06-11 20:56:18 +02:00
}
/* handle not found try to alloc a new one */
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i ) {
if ( ! atomic_cmpxchg ( & p - > adev - > vce . handles [ i ] , 0 , handle ) ) {
p - > adev - > vce . filp [ i ] = p - > filp ;
p - > adev - > vce . img_size [ i ] = 0 ;
2016-07-01 22:19:25 +02:00
* allocated | = 1 < < i ;
2015-06-11 20:56:18 +02:00
return i ;
}
}
DRM_ERROR ( " No more free VCE handles! \n " ) ;
return - EINVAL ;
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_vce_cs_parse - parse and validate the command stream
*
* @ p : parser context
*
*/
int amdgpu_vce_ring_parse_cs ( struct amdgpu_cs_parser * p , uint32_t ib_idx )
{
2016-02-03 13:44:52 +01:00
struct amdgpu_ib * ib = & p - > job - > ibs [ ib_idx ] ;
2015-06-12 14:16:20 +02:00
unsigned fb_idx = 0 , bs_idx = 0 ;
2015-06-11 20:56:18 +02:00
int session_idx = - 1 ;
2016-07-01 22:19:25 +02:00
uint32_t destroyed = 0 ;
uint32_t created = 0 ;
uint32_t allocated = 0 ;
2015-06-11 20:56:18 +02:00
uint32_t tmp , handle = 0 ;
uint32_t * size = & tmp ;
2017-11-17 11:09:43 +01:00
unsigned idx ;
int i , r = 0 ;
2016-09-05 17:00:57 +02:00
2016-10-05 16:49:19 +02:00
p - > job - > vm = NULL ;
ib - > gpu_addr = amdgpu_sa_bo_gpu_addr ( ib - > sa_bo ) ;
2017-11-17 11:09:43 +01:00
for ( idx = 0 ; idx < ib - > length_dw ; ) {
2015-04-20 16:55:21 -04:00
uint32_t len = amdgpu_get_ib_value ( p , ib_idx , idx ) ;
uint32_t cmd = amdgpu_get_ib_value ( p , ib_idx , idx + 1 ) ;
if ( ( len < 8 ) | | ( len & 3 ) ) {
DRM_ERROR ( " invalid VCE command length (%d)! \n " , len ) ;
2015-06-11 21:33:55 +02:00
r = - EINVAL ;
goto out ;
2015-04-20 16:55:21 -04:00
}
2017-11-17 11:09:43 +01:00
switch ( cmd ) {
case 0x00000002 : /* task info */
fb_idx = amdgpu_get_ib_value ( p , ib_idx , idx + 6 ) ;
bs_idx = amdgpu_get_ib_value ( p , ib_idx , idx + 7 ) ;
break ;
case 0x03000001 : /* encode */
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 10 ,
idx + 9 , 0 , 0 ) ;
if ( r )
goto out ;
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 12 ,
idx + 11 , 0 , 0 ) ;
if ( r )
goto out ;
break ;
case 0x05000001 : /* context buffer */
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 3 ,
idx + 2 , 0 , 0 ) ;
if ( r )
goto out ;
break ;
case 0x05000004 : /* video bitstream buffer */
tmp = amdgpu_get_ib_value ( p , ib_idx , idx + 4 ) ;
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 3 , idx + 2 ,
tmp , bs_idx ) ;
if ( r )
goto out ;
break ;
case 0x05000005 : /* feedback buffer */
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 3 , idx + 2 ,
4096 , fb_idx ) ;
if ( r )
goto out ;
break ;
2018-04-03 10:41:32 -04:00
case 0x0500000d : /* MV buffer */
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 3 ,
idx + 2 , 0 , 0 ) ;
if ( r )
goto out ;
r = amdgpu_vce_validate_bo ( p , ib_idx , idx + 8 ,
idx + 7 , 0 , 0 ) ;
if ( r )
goto out ;
break ;
2017-11-17 11:09:43 +01:00
}
idx + = len / 4 ;
}
for ( idx = 0 ; idx < ib - > length_dw ; ) {
uint32_t len = amdgpu_get_ib_value ( p , ib_idx , idx ) ;
uint32_t cmd = amdgpu_get_ib_value ( p , ib_idx , idx + 1 ) ;
2015-04-20 16:55:21 -04:00
switch ( cmd ) {
2016-07-01 17:43:57 +02:00
case 0x00000001 : /* session */
2015-04-20 16:55:21 -04:00
handle = amdgpu_get_ib_value ( p , ib_idx , idx + 2 ) ;
2015-06-11 21:33:55 +02:00
session_idx = amdgpu_vce_validate_handle ( p , handle ,
& allocated ) ;
2016-07-01 22:19:25 +02:00
if ( session_idx < 0 ) {
r = session_idx ;
goto out ;
}
2015-06-11 20:56:18 +02:00
size = & p - > adev - > vce . img_size [ session_idx ] ;
2015-04-20 16:55:21 -04:00
break ;
2016-07-01 17:43:57 +02:00
case 0x00000002 : /* task info */
2015-06-12 14:16:20 +02:00
fb_idx = amdgpu_get_ib_value ( p , ib_idx , idx + 6 ) ;
bs_idx = amdgpu_get_ib_value ( p , ib_idx , idx + 7 ) ;
2015-06-11 20:56:18 +02:00
break ;
2016-07-01 17:43:57 +02:00
case 0x01000001 : /* create */
2016-07-01 22:19:25 +02:00
created | = 1 < < session_idx ;
if ( destroyed & ( 1 < < session_idx ) ) {
destroyed & = ~ ( 1 < < session_idx ) ;
allocated | = 1 < < session_idx ;
} else if ( ! ( allocated & ( 1 < < session_idx ) ) ) {
2015-06-11 21:33:55 +02:00
DRM_ERROR ( " Handle already in use! \n " ) ;
r = - EINVAL ;
goto out ;
}
2015-06-11 20:56:18 +02:00
* size = amdgpu_get_ib_value ( p , ib_idx , idx + 8 ) *
amdgpu_get_ib_value ( p , ib_idx , idx + 10 ) *
8 * 3 / 2 ;
break ;
2016-07-01 17:43:57 +02:00
case 0x04000001 : /* config extension */
case 0x04000002 : /* pic control */
case 0x04000005 : /* rate control */
case 0x04000007 : /* motion estimation */
case 0x04000008 : /* rdo */
case 0x04000009 : /* vui */
case 0x05000002 : /* auxiliary buffer */
2016-09-21 14:57:06 -04:00
case 0x05000009 : /* clock table */
2015-04-20 16:55:21 -04:00
break ;
2016-09-23 17:22:42 -04:00
case 0x0500000c : /* hw config */
switch ( p - > adev - > asic_type ) {
# ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI :
case CHIP_MULLINS :
# endif
case CHIP_CARRIZO :
break ;
default :
r = - EINVAL ;
goto out ;
}
break ;
2016-07-01 17:43:57 +02:00
case 0x03000001 : /* encode */
2015-06-11 20:56:18 +02:00
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 10 , idx + 9 ,
2015-06-12 14:16:20 +02:00
* size , 0 ) ;
2015-04-20 16:55:21 -04:00
if ( r )
2015-06-11 21:33:55 +02:00
goto out ;
2015-04-20 16:55:21 -04:00
2015-06-11 20:56:18 +02:00
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 12 , idx + 11 ,
2015-06-12 14:16:20 +02:00
* size / 3 , 0 ) ;
2015-04-20 16:55:21 -04:00
if ( r )
2015-06-11 21:33:55 +02:00
goto out ;
2015-04-20 16:55:21 -04:00
break ;
2016-07-01 17:43:57 +02:00
case 0x02000001 : /* destroy */
2016-07-01 22:19:25 +02:00
destroyed | = 1 < < session_idx ;
2015-04-20 16:55:21 -04:00
break ;
2016-07-01 17:43:57 +02:00
case 0x05000001 : /* context buffer */
2015-06-11 20:56:18 +02:00
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 3 , idx + 2 ,
2015-06-12 14:16:20 +02:00
* size * 2 , 0 ) ;
2015-06-11 20:56:18 +02:00
if ( r )
2015-06-11 21:33:55 +02:00
goto out ;
2015-06-11 20:56:18 +02:00
break ;
2016-07-01 17:43:57 +02:00
case 0x05000004 : /* video bitstream buffer */
2015-06-11 20:56:18 +02:00
tmp = amdgpu_get_ib_value ( p , ib_idx , idx + 4 ) ;
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 3 , idx + 2 ,
2015-06-12 14:16:20 +02:00
tmp , bs_idx ) ;
2015-06-11 20:56:18 +02:00
if ( r )
2015-06-11 21:33:55 +02:00
goto out ;
2015-06-11 20:56:18 +02:00
break ;
2016-07-01 17:43:57 +02:00
case 0x05000005 : /* feedback buffer */
2015-06-11 20:56:18 +02:00
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 3 , idx + 2 ,
2015-06-12 14:16:20 +02:00
4096 , fb_idx ) ;
2015-04-20 16:55:21 -04:00
if ( r )
2015-06-11 21:33:55 +02:00
goto out ;
2015-04-20 16:55:21 -04:00
break ;
2018-04-03 10:41:32 -04:00
case 0x0500000d : /* MV buffer */
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 3 ,
idx + 2 , * size , 0 ) ;
if ( r )
goto out ;
r = amdgpu_vce_cs_reloc ( p , ib_idx , idx + 8 ,
idx + 7 , * size / 12 , 0 ) ;
if ( r )
goto out ;
break ;
2015-04-20 16:55:21 -04:00
default :
DRM_ERROR ( " invalid VCE command (0x%x)! \n " , cmd ) ;
2015-06-11 21:33:55 +02:00
r = - EINVAL ;
goto out ;
2015-04-20 16:55:21 -04:00
}
2015-06-11 20:56:18 +02:00
if ( session_idx = = - 1 ) {
DRM_ERROR ( " no session command at start of IB \n " ) ;
2015-06-11 21:33:55 +02:00
r = - EINVAL ;
goto out ;
2015-06-11 20:56:18 +02:00
}
2015-04-20 16:55:21 -04:00
idx + = len / 4 ;
}
2016-07-01 22:19:25 +02:00
if ( allocated & ~ created ) {
2015-06-11 21:33:55 +02:00
DRM_ERROR ( " New session without create command! \n " ) ;
r = - ENOENT ;
}
out :
2016-07-01 22:19:25 +02:00
if ( ! r ) {
/* No error, free all destroyed handle slots */
tmp = destroyed ;
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated ;
2015-04-20 16:55:21 -04:00
}
2016-07-01 22:19:25 +02:00
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i )
if ( tmp & ( 1 < < i ) )
atomic_set ( & p - > adev - > vce . handles [ i ] , 0 ) ;
2015-06-11 21:33:55 +02:00
return r ;
2015-04-20 16:55:21 -04:00
}
2016-10-10 15:23:32 +02:00
/**
* amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
*
* @ p : parser context
*
*/
int amdgpu_vce_ring_parse_cs_vm ( struct amdgpu_cs_parser * p , uint32_t ib_idx )
{
struct amdgpu_ib * ib = & p - > job - > ibs [ ib_idx ] ;
int session_idx = - 1 ;
uint32_t destroyed = 0 ;
uint32_t created = 0 ;
uint32_t allocated = 0 ;
uint32_t tmp , handle = 0 ;
int i , r = 0 , idx = 0 ;
while ( idx < ib - > length_dw ) {
uint32_t len = amdgpu_get_ib_value ( p , ib_idx , idx ) ;
uint32_t cmd = amdgpu_get_ib_value ( p , ib_idx , idx + 1 ) ;
if ( ( len < 8 ) | | ( len & 3 ) ) {
DRM_ERROR ( " invalid VCE command length (%d)! \n " , len ) ;
r = - EINVAL ;
goto out ;
}
switch ( cmd ) {
case 0x00000001 : /* session */
handle = amdgpu_get_ib_value ( p , ib_idx , idx + 2 ) ;
session_idx = amdgpu_vce_validate_handle ( p , handle ,
& allocated ) ;
if ( session_idx < 0 ) {
r = session_idx ;
goto out ;
}
break ;
case 0x01000001 : /* create */
created | = 1 < < session_idx ;
if ( destroyed & ( 1 < < session_idx ) ) {
destroyed & = ~ ( 1 < < session_idx ) ;
allocated | = 1 < < session_idx ;
} else if ( ! ( allocated & ( 1 < < session_idx ) ) ) {
DRM_ERROR ( " Handle already in use! \n " ) ;
r = - EINVAL ;
goto out ;
}
break ;
case 0x02000001 : /* destroy */
destroyed | = 1 < < session_idx ;
break ;
default :
break ;
}
if ( session_idx = = - 1 ) {
DRM_ERROR ( " no session command at start of IB \n " ) ;
r = - EINVAL ;
goto out ;
}
idx + = len / 4 ;
}
if ( allocated & ~ created ) {
DRM_ERROR ( " New session without create command! \n " ) ;
r = - ENOENT ;
}
out :
if ( ! r ) {
/* No error, free all destroyed handle slots */
tmp = destroyed ;
amdgpu_ib_free ( p - > adev , ib , NULL ) ;
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated ;
}
for ( i = 0 ; i < AMDGPU_MAX_VCE_HANDLES ; + + i )
if ( tmp & ( 1 < < i ) )
atomic_set ( & p - > adev - > vce . handles [ i ] , 0 ) ;
return r ;
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ ring : engine to use
* @ ib : the IB to execute
*
*/
2018-10-24 13:37:37 +08:00
void amdgpu_vce_ring_emit_ib ( struct amdgpu_ring * ring ,
struct amdgpu_job * job ,
struct amdgpu_ib * ib ,
2019-01-18 18:13:36 +08:00
uint32_t flags )
2015-04-20 16:55:21 -04:00
{
amdgpu_ring_write ( ring , VCE_CMD_IB ) ;
amdgpu_ring_write ( ring , lower_32_bits ( ib - > gpu_addr ) ) ;
amdgpu_ring_write ( ring , upper_32_bits ( ib - > gpu_addr ) ) ;
amdgpu_ring_write ( ring , ib - > length_dw ) ;
}
/**
* amdgpu_vce_ring_emit_fence - add a fence command to the ring
*
* @ ring : engine to use
* @ fence : the fence
*
*/
void amdgpu_vce_ring_emit_fence ( struct amdgpu_ring * ring , u64 addr , u64 seq ,
2015-06-01 14:35:03 +08:00
unsigned flags )
2015-04-20 16:55:21 -04:00
{
2015-06-01 14:35:03 +08:00
WARN_ON ( flags & AMDGPU_FENCE_FLAG_64BIT ) ;
2015-04-20 16:55:21 -04:00
amdgpu_ring_write ( ring , VCE_CMD_FENCE ) ;
amdgpu_ring_write ( ring , addr ) ;
amdgpu_ring_write ( ring , upper_32_bits ( addr ) ) ;
amdgpu_ring_write ( ring , seq ) ;
amdgpu_ring_write ( ring , VCE_CMD_TRAP ) ;
amdgpu_ring_write ( ring , VCE_CMD_END ) ;
}
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
* @ ring : the engine to test on
*
*/
int amdgpu_vce_ring_test_ring ( struct amdgpu_ring * ring )
{
struct amdgpu_device * adev = ring - > adev ;
2019-05-25 06:39:47 +08:00
uint32_t rptr ;
2015-04-20 16:55:21 -04:00
unsigned i ;
2017-04-06 14:43:48 +08:00
int r , timeout = adev - > usec_timeout ;
2017-06-12 11:02:09 +08:00
/* skip ring test for sriov*/
2017-04-06 14:43:48 +08:00
if ( amdgpu_sriov_vf ( adev ) )
2017-06-12 11:02:09 +08:00
return 0 ;
2015-04-20 16:55:21 -04:00
2016-01-21 11:28:53 +01:00
r = amdgpu_ring_alloc ( ring , 16 ) ;
2018-10-29 10:48:31 +01:00
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2018-10-29 10:48:31 +01:00
2019-05-25 06:39:47 +08:00
rptr = amdgpu_ring_get_rptr ( ring ) ;
2015-04-20 16:55:21 -04:00
amdgpu_ring_write ( ring , VCE_CMD_END ) ;
2016-01-21 11:28:53 +01:00
amdgpu_ring_commit ( ring ) ;
2015-04-20 16:55:21 -04:00
2017-04-06 14:43:48 +08:00
for ( i = 0 ; i < timeout ; i + + ) {
2015-04-20 16:55:21 -04:00
if ( amdgpu_ring_get_rptr ( ring ) ! = rptr )
break ;
2019-06-10 00:07:49 +02:00
udelay ( 1 ) ;
2015-04-20 16:55:21 -04:00
}
2018-10-29 10:48:31 +01:00
if ( i > = timeout )
2015-04-20 16:55:21 -04:00
r = - ETIMEDOUT ;
return r ;
}
/**
* amdgpu_vce_ring_test_ib - test if VCE IBs are working
*
* @ ring : the engine to test on
*
*/
2016-07-05 21:07:17 +02:00
int amdgpu_vce_ring_test_ib ( struct amdgpu_ring * ring , long timeout )
2015-04-20 16:55:21 -04:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * fence = NULL ;
2019-10-17 11:36:47 -04:00
struct amdgpu_bo * bo = NULL ;
2016-07-05 21:07:17 +02:00
long r ;
2015-04-20 16:55:21 -04:00
2016-08-24 17:15:33 -04:00
/* skip vce ring1/2 ib test for now, since it's not reliable */
if ( ring ! = & ring - > adev - > vce . ring [ 0 ] )
2015-09-04 15:08:55 -04:00
return 0 ;
2019-10-17 11:36:47 -04:00
r = amdgpu_bo_create_reserved ( ring - > adev , 512 , PAGE_SIZE ,
AMDGPU_GEM_DOMAIN_VRAM ,
& bo , NULL , NULL ) ;
if ( r )
return r ;
r = amdgpu_vce_get_create_msg ( ring , 1 , bo , NULL ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2015-04-20 16:55:21 -04:00
goto error ;
2016-02-03 16:50:56 +01:00
r = amdgpu_vce_get_destroy_msg ( ring , 1 , true , & fence ) ;
2018-10-29 16:12:42 +01:00
if ( r )
2015-04-20 16:55:21 -04:00
goto error ;
2016-10-25 13:00:45 +01:00
r = dma_fence_wait_timeout ( fence , false , timeout ) ;
2018-10-29 16:12:42 +01:00
if ( r = = 0 )
2016-07-05 21:07:17 +02:00
r = - ETIMEDOUT ;
2018-10-29 16:12:42 +01:00
else if ( r > 0 )
2016-07-05 21:07:17 +02:00
r = 0 ;
2018-10-29 16:12:42 +01:00
2015-04-20 16:55:21 -04:00
error :
2016-10-25 13:00:45 +01:00
dma_fence_put ( fence ) ;
2019-10-17 11:36:47 -04:00
amdgpu_bo_unreserve ( bo ) ;
amdgpu_bo_unref ( & bo ) ;
2015-04-20 16:55:21 -04:00
return r ;
}