2016-12-21 13:21:52 -05:00
/*
* Copyright 2016 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef __AMDGPU_VCN_H__
# define __AMDGPU_VCN_H__
2018-10-02 13:31:31 -04:00
# define AMDGPU_VCN_STACK_SIZE (128*1024)
2018-10-15 15:41:36 -04:00
# define AMDGPU_VCN_CONTEXT_SIZE (512*1024)
2018-10-02 13:31:31 -04:00
2017-05-11 16:27:33 -04:00
# define AMDGPU_VCN_FIRMWARE_OFFSET 256
# define AMDGPU_VCN_MAX_ENC_RINGS 3
2019-07-10 10:53:34 -05:00
# define AMDGPU_MAX_VCN_INSTANCES 2
2019-12-16 14:43:34 +01:00
# define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
2019-07-10 10:53:34 -05:00
2019-07-10 12:07:29 -05:00
# define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
# define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
2019-07-25 11:21:58 -04:00
# define VCN_DEC_KMD_CMD 0x80000000
2017-02-15 10:16:25 -05:00
# define VCN_DEC_CMD_FENCE 0x00000000
# define VCN_DEC_CMD_TRAP 0x00000001
# define VCN_DEC_CMD_WRITE_REG 0x00000004
# define VCN_DEC_CMD_REG_READ_COND_WAIT 0x00000006
# define VCN_DEC_CMD_PACKET_START 0x0000000a
# define VCN_DEC_CMD_PACKET_END 0x0000000b
2017-02-07 11:47:12 -05:00
2017-02-21 10:36:15 -05:00
# define VCN_ENC_CMD_NO_OP 0x00000000
# define VCN_ENC_CMD_END 0x00000001
# define VCN_ENC_CMD_IB 0x00000002
# define VCN_ENC_CMD_FENCE 0x00000003
# define VCN_ENC_CMD_TRAP 0x00000004
# define VCN_ENC_CMD_REG_WRITE 0x0000000b
# define VCN_ENC_CMD_REG_WAIT 0x0000000c
2019-05-24 12:19:00 -04:00
# define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
# define VCN_AON_SOC_ADDRESS_2_0 0x1f800
# define VCN_VID_IP_ADDRESS_2_0 0x0
# define VCN_AON_IP_ADDRESS_2_0 0x30000
2020-01-15 11:38:57 -05:00
# define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
# define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
# define mmUVD_REG_XX_MASK 0x026c
# define mmUVD_REG_XX_MASK_BASE_IDX 1
2019-12-12 10:28:02 -05:00
/* 1 second timeout */
# define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
2020-01-21 16:33:21 -05:00
# define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \
( { WREG32_SOC15 ( ip , inst_idx , mmUVD_DPG_LMA_MASK , mask ) ; \
WREG32_SOC15 ( ip , inst_idx , mmUVD_DPG_LMA_CTL , \
2019-05-13 12:15:45 -04:00
UVD_DPG_LMA_CTL__MASK_EN_MASK | \
2020-01-21 16:33:21 -05:00
( ( adev - > reg_offset [ ip # # _HWIP ] [ inst_idx ] [ reg # # _BASE_IDX ] + reg ) \
2019-05-13 12:15:45 -04:00
< < UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT ) | \
( sram_sel < < UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT ) ) ; \
2020-01-21 16:33:21 -05:00
RREG32_SOC15 ( ip , inst_idx , mmUVD_DPG_LMA_DATA ) ; \
2019-05-13 12:15:45 -04:00
} )
2020-01-21 16:33:21 -05:00
# define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \
2019-05-13 12:15:45 -04:00
do { \
2020-01-21 16:33:21 -05:00
WREG32_SOC15 ( ip , inst_idx , mmUVD_DPG_LMA_DATA , value ) ; \
WREG32_SOC15 ( ip , inst_idx , mmUVD_DPG_LMA_MASK , mask ) ; \
WREG32_SOC15 ( ip , inst_idx , mmUVD_DPG_LMA_CTL , \
2019-05-13 12:15:45 -04:00
UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
2020-01-21 16:33:21 -05:00
( ( adev - > reg_offset [ ip # # _HWIP ] [ inst_idx ] [ reg # # _BASE_IDX ] + reg ) \
2019-05-13 12:15:45 -04:00
< < UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT ) | \
( sram_sel < < UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT ) ) ; \
} while ( 0 )
2020-01-21 16:33:21 -05:00
# define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \
2019-05-24 12:19:00 -04:00
( { \
uint32_t internal_reg_offset , addr ; \
bool video_range , aon_range ; \
\
2020-01-21 16:33:21 -05:00
addr = ( adev - > reg_offset [ ip # # _HWIP ] [ inst_idx ] [ reg # # _BASE_IDX ] + reg ) ; \
2019-05-24 12:19:00 -04:00
addr < < = 2 ; \
video_range = ( ( ( ( 0xFFFFF & addr ) > = ( VCN_VID_SOC_ADDRESS_2_0 ) ) & & \
( ( 0xFFFFF & addr ) < ( ( VCN_VID_SOC_ADDRESS_2_0 + 0x2600 ) ) ) ) ) ; \
aon_range = ( ( ( ( 0xFFFFF & addr ) > = ( VCN_AON_SOC_ADDRESS_2_0 ) ) & & \
( ( 0xFFFFF & addr ) < ( ( VCN_AON_SOC_ADDRESS_2_0 + 0x600 ) ) ) ) ) ; \
if ( video_range ) \
internal_reg_offset = ( ( 0xFFFFF & addr ) - ( VCN_VID_SOC_ADDRESS_2_0 ) + \
( VCN_VID_IP_ADDRESS_2_0 ) ) ; \
else if ( aon_range ) \
internal_reg_offset = ( ( 0xFFFFF & addr ) - ( VCN_AON_SOC_ADDRESS_2_0 ) + \
( VCN_AON_IP_ADDRESS_2_0 ) ) ; \
else \
internal_reg_offset = ( 0xFFFFF & addr ) ; \
\
internal_reg_offset > > = 2 ; \
} )
2020-01-15 11:36:49 -05:00
# define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \
( { \
2020-01-21 16:28:07 -05:00
WREG32_SOC15 ( VCN , inst_idx , mmUVD_DPG_LMA_CTL , \
2020-01-15 11:36:49 -05:00
( 0x0 < < UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
mask_en < < UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
offset < < UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT ) ) ; \
RREG32_SOC15 ( VCN , inst_idx , mmUVD_DPG_LMA_DATA ) ; \
2019-05-24 12:19:00 -04:00
} )
2020-01-15 11:36:49 -05:00
# define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect) \
do { \
if ( ! indirect ) { \
WREG32_SOC15 ( VCN , inst_idx , mmUVD_DPG_LMA_DATA , value ) ; \
WREG32_SOC15 ( VCN , inst_idx , mmUVD_DPG_LMA_CTL , \
( 0x1 < < UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
mask_en < < UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
offset < < UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT ) ) ; \
} else { \
* adev - > vcn . inst [ inst_idx ] . dpg_sram_curr_addr + + = offset ; \
* adev - > vcn . inst [ inst_idx ] . dpg_sram_curr_addr + + = value ; \
} \
2019-05-24 12:19:00 -04:00
} while ( 0 )
2018-05-17 16:07:02 +08:00
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0 ,
2019-04-30 10:15:38 -04:00
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0 ,
2018-05-17 16:07:02 +08:00
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002 ,
UVD_STATUS__UVD_BUSY = 0x00000004 ,
GB_ADDR_CONFIG_DEFAULT = 0x26010011 ,
UVD_STATUS__IDLE = 0x2 ,
UVD_STATUS__BUSY = 0x5 ,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1 ,
UVD_STATUS__RBC_BUSY = 0x1 ,
2018-10-15 11:38:59 -04:00
UVD_PGFSM_STATUS_UVDJ_PWR_ON = 0 ,
2018-05-17 16:07:02 +08:00
} ;
2018-09-10 18:15:11 -04:00
enum internal_dpg_state {
VCN_DPG_STATE__UNPAUSE = 0 ,
VCN_DPG_STATE__PAUSE ,
} ;
struct dpg_pause_state {
enum internal_dpg_state fw_based ;
enum internal_dpg_state jpeg ;
} ;
2018-10-17 14:33:48 -04:00
struct amdgpu_vcn_reg {
2018-10-15 15:41:36 -04:00
unsigned data0 ;
unsigned data1 ;
unsigned cmd ;
unsigned nop ;
2019-04-15 09:39:06 -04:00
unsigned context_id ;
unsigned ib_vmid ;
unsigned ib_bar_low ;
unsigned ib_bar_high ;
unsigned ib_size ;
unsigned gp_scratch8 ;
2018-10-17 14:33:48 -04:00
unsigned scratch9 ;
} ;
2019-07-10 10:53:34 -05:00
struct amdgpu_vcn_inst {
2017-05-11 16:27:33 -04:00
struct amdgpu_bo * vcpu_bo ;
void * cpu_addr ;
uint64_t gpu_addr ;
void * saved_bo ;
struct amdgpu_ring ring_dec ;
struct amdgpu_ring ring_enc [ AMDGPU_VCN_MAX_ENC_RINGS ] ;
struct amdgpu_irq_src irq ;
2019-07-10 10:53:34 -05:00
struct amdgpu_vcn_reg external ;
2020-01-15 11:36:49 -05:00
struct amdgpu_bo * dpg_sram_bo ;
2020-02-05 09:20:22 -05:00
struct dpg_pause_state pause_state ;
2020-01-15 11:36:49 -05:00
void * dpg_sram_cpu_addr ;
uint64_t dpg_sram_gpu_addr ;
uint32_t * dpg_sram_curr_addr ;
2019-07-10 10:53:34 -05:00
} ;
struct amdgpu_vcn {
unsigned fw_version ;
struct delayed_work idle_work ;
const struct firmware * fw ; /* VCN firmware */
2017-02-21 15:21:18 -05:00
unsigned num_enc_rings ;
2018-09-13 16:55:44 -04:00
enum amd_powergating_state cur_state ;
2019-05-24 14:07:41 -04:00
bool indirect_sram ;
2019-07-10 10:53:34 -05:00
uint8_t num_vcn_inst ;
2019-12-16 14:43:34 +01:00
struct amdgpu_vcn_inst inst [ AMDGPU_MAX_VCN_INSTANCES ] ;
struct amdgpu_vcn_reg internal ;
struct drm_gpu_scheduler * vcn_enc_sched [ AMDGPU_MAX_VCN_ENC_RINGS ] ;
struct drm_gpu_scheduler * vcn_dec_sched [ AMDGPU_MAX_VCN_INSTANCES ] ;
uint32_t num_vcn_enc_sched ;
uint32_t num_vcn_dec_sched ;
2019-07-10 10:53:34 -05:00
2019-07-10 12:07:29 -05:00
unsigned harvest_config ;
2019-07-10 10:53:34 -05:00
int ( * pause_dpg_mode ) ( struct amdgpu_device * adev ,
2020-01-13 16:40:00 -05:00
int inst_idx , struct dpg_pause_state * new_state ) ;
2017-05-11 16:27:33 -04:00
} ;
2016-12-21 13:21:52 -05:00
int amdgpu_vcn_sw_init ( struct amdgpu_device * adev ) ;
int amdgpu_vcn_sw_fini ( struct amdgpu_device * adev ) ;
int amdgpu_vcn_suspend ( struct amdgpu_device * adev ) ;
int amdgpu_vcn_resume ( struct amdgpu_device * adev ) ;
void amdgpu_vcn_ring_begin_use ( struct amdgpu_ring * ring ) ;
void amdgpu_vcn_ring_end_use ( struct amdgpu_ring * ring ) ;
2017-02-06 11:52:46 -05:00
int amdgpu_vcn_dec_ring_test_ring ( struct amdgpu_ring * ring ) ;
2016-12-21 13:21:52 -05:00
int amdgpu_vcn_dec_ring_test_ib ( struct amdgpu_ring * ring , long timeout ) ;
2016-12-21 13:56:44 -05:00
int amdgpu_vcn_enc_ring_test_ring ( struct amdgpu_ring * ring ) ;
int amdgpu_vcn_enc_ring_test_ib ( struct amdgpu_ring * ring , long timeout ) ;
2016-12-21 13:21:52 -05:00
# endif