2015-04-20 16:55:21 -04:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef __AMDGPU_GFX_H__
# define __AMDGPU_GFX_H__
2018-08-02 16:12:39 +08:00
/*
* GFX stuff
*/
# include "clearstate_defs.h"
# include "amdgpu_ring.h"
2015-04-20 16:55:21 -04:00
2018-08-02 16:12:39 +08:00
/* GFX current status */
# define AMDGPU_GFX_NORMAL_MODE 0x00000000L
# define AMDGPU_GFX_SAFE_MODE 0x00000001L
# define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
# define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
# define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
2016-06-17 19:31:33 +02:00
2017-06-07 11:05:26 -04:00
2018-08-02 16:12:39 +08:00
struct amdgpu_rlc_funcs {
void ( * enter_safe_mode ) ( struct amdgpu_device * adev ) ;
void ( * exit_safe_mode ) ( struct amdgpu_device * adev ) ;
} ;
2017-06-07 13:31:32 -04:00
2018-08-02 16:12:39 +08:00
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo * save_restore_obj ;
uint64_t save_restore_gpu_addr ;
volatile uint32_t * sr_ptr ;
const u32 * reg_list ;
u32 reg_list_size ;
/* for clear state */
struct amdgpu_bo * clear_state_obj ;
uint64_t clear_state_gpu_addr ;
volatile uint32_t * cs_ptr ;
const struct cs_section_def * cs_data ;
u32 clear_state_size ;
/* for cp tables */
struct amdgpu_bo * cp_table_obj ;
uint64_t cp_table_gpu_addr ;
volatile uint32_t * cp_table_ptr ;
u32 cp_table_size ;
2017-06-07 13:31:32 -04:00
2018-08-02 16:12:39 +08:00
/* safe mode for updating CG/PG state */
bool in_safe_mode ;
const struct amdgpu_rlc_funcs * funcs ;
2017-06-07 13:31:32 -04:00
2018-08-02 16:12:39 +08:00
/* for firmware data */
u32 save_and_restore_offset ;
u32 clear_state_descriptor_offset ;
u32 avail_scratch_ram_locations ;
u32 reg_restore_list_size ;
u32 reg_list_format_start ;
u32 reg_list_format_separate_start ;
u32 starting_offsets_start ;
u32 reg_list_format_size_bytes ;
u32 reg_list_size_bytes ;
u32 reg_list_format_direct_reg_list_length ;
u32 save_restore_list_cntl_size_bytes ;
u32 save_restore_list_gpm_size_bytes ;
u32 save_restore_list_srm_size_bytes ;
u32 * register_list_format ;
u32 * register_restore ;
u8 * save_restore_list_cntl ;
u8 * save_restore_list_gpm ;
u8 * save_restore_list_srm ;
bool is_rlc_v2_1 ;
} ;
# define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
struct amdgpu_mec {
struct amdgpu_bo * hpd_eop_obj ;
u64 hpd_eop_gpu_addr ;
struct amdgpu_bo * mec_fw_obj ;
u64 mec_fw_gpu_addr ;
u32 num_mec ;
u32 num_pipe_per_mec ;
u32 num_queue_per_pipe ;
void * mqd_backup [ AMDGPU_MAX_COMPUTE_RINGS + 1 ] ;
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP ( queue_bitmap , AMDGPU_MAX_COMPUTE_QUEUES ) ;
} ;
struct amdgpu_kiq {
u64 eop_gpu_addr ;
struct amdgpu_bo * eop_obj ;
spinlock_t ring_lock ;
struct amdgpu_ring ring ;
struct amdgpu_irq_src irq ;
} ;
/*
* GPU scratch registers structures , functions & helpers
*/
struct amdgpu_scratch {
unsigned num_reg ;
uint32_t reg_base ;
uint32_t free_mask ;
} ;
/*
* GFX configurations
*/
# define AMDGPU_GFX_MAX_SE 4
# define AMDGPU_GFX_MAX_SH_PER_SE 2
struct amdgpu_rb_config {
uint32_t rb_backend_disable ;
uint32_t user_rb_backend_disable ;
uint32_t raster_config ;
uint32_t raster_config_1 ;
} ;
struct gb_addr_config {
uint16_t pipe_interleave_size ;
uint8_t num_pipes ;
uint8_t max_compress_frags ;
uint8_t num_banks ;
uint8_t num_se ;
uint8_t num_rb_per_se ;
} ;
struct amdgpu_gfx_config {
unsigned max_shader_engines ;
unsigned max_tile_pipes ;
unsigned max_cu_per_sh ;
unsigned max_sh_per_se ;
unsigned max_backends_per_se ;
unsigned max_texture_channel_caches ;
unsigned max_gprs ;
unsigned max_gs_threads ;
unsigned max_hw_contexts ;
unsigned sc_prim_fifo_size_frontend ;
unsigned sc_prim_fifo_size_backend ;
unsigned sc_hiz_tile_fifo_size ;
unsigned sc_earlyz_tile_fifo_size ;
unsigned num_tile_pipes ;
unsigned backend_enable_mask ;
unsigned mem_max_burst_length_bytes ;
unsigned mem_row_size_in_kb ;
unsigned shader_engine_tile_size ;
unsigned num_gpus ;
unsigned multi_gpu_tile_size ;
unsigned mc_arb_ramcfg ;
unsigned gb_addr_config ;
unsigned num_rbs ;
unsigned gs_vgt_table_depth ;
unsigned gs_prim_buffer_depth ;
uint32_t tile_mode_array [ 32 ] ;
uint32_t macrotile_mode_array [ 16 ] ;
struct gb_addr_config gb_addr_config_fields ;
struct amdgpu_rb_config rb_config [ AMDGPU_GFX_MAX_SE ] [ AMDGPU_GFX_MAX_SH_PER_SE ] ;
/* gfx configure feature */
uint32_t double_offchip_lds_buf ;
/* cached value of DB_DEBUG2 */
uint32_t db_debug2 ;
} ;
struct amdgpu_cu_info {
uint32_t simd_per_cu ;
uint32_t max_waves_per_simd ;
uint32_t wave_front_size ;
uint32_t max_scratch_slots_per_cu ;
uint32_t lds_size ;
/* total active CU number */
uint32_t number ;
uint32_t ao_cu_mask ;
uint32_t ao_cu_bitmap [ 4 ] [ 4 ] ;
uint32_t bitmap [ 4 ] [ 4 ] ;
} ;
struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t ( * get_gpu_clock_counter ) ( struct amdgpu_device * adev ) ;
void ( * select_se_sh ) ( struct amdgpu_device * adev , u32 se_num ,
u32 sh_num , u32 instance ) ;
void ( * read_wave_data ) ( struct amdgpu_device * adev , uint32_t simd ,
uint32_t wave , uint32_t * dst , int * no_fields ) ;
void ( * read_wave_vgprs ) ( struct amdgpu_device * adev , uint32_t simd ,
uint32_t wave , uint32_t thread , uint32_t start ,
uint32_t size , uint32_t * dst ) ;
void ( * read_wave_sgprs ) ( struct amdgpu_device * adev , uint32_t simd ,
uint32_t wave , uint32_t start , uint32_t size ,
uint32_t * dst ) ;
void ( * select_me_pipe_q ) ( struct amdgpu_device * adev , u32 me , u32 pipe ,
u32 queue ) ;
} ;
struct amdgpu_ngg_buf {
struct amdgpu_bo * bo ;
uint64_t gpu_addr ;
uint32_t size ;
uint32_t bo_size ;
} ;
enum {
NGG_PRIM = 0 ,
NGG_POS ,
NGG_CNTL ,
NGG_PARAM ,
NGG_BUF_MAX
} ;
struct amdgpu_ngg {
struct amdgpu_ngg_buf buf [ NGG_BUF_MAX ] ;
uint32_t gds_reserve_addr ;
uint32_t gds_reserve_size ;
bool init ;
} ;
struct sq_work {
struct work_struct work ;
unsigned ih_data ;
} ;
struct amdgpu_gfx {
struct mutex gpu_clock_mutex ;
struct amdgpu_gfx_config config ;
struct amdgpu_rlc rlc ;
struct amdgpu_mec mec ;
struct amdgpu_kiq kiq ;
struct amdgpu_scratch scratch ;
const struct firmware * me_fw ; /* ME firmware */
uint32_t me_fw_version ;
const struct firmware * pfp_fw ; /* PFP firmware */
uint32_t pfp_fw_version ;
const struct firmware * ce_fw ; /* CE firmware */
uint32_t ce_fw_version ;
const struct firmware * rlc_fw ; /* RLC firmware */
uint32_t rlc_fw_version ;
const struct firmware * mec_fw ; /* MEC firmware */
uint32_t mec_fw_version ;
const struct firmware * mec2_fw ; /* MEC2 firmware */
uint32_t mec2_fw_version ;
uint32_t me_feature_version ;
uint32_t ce_feature_version ;
uint32_t pfp_feature_version ;
uint32_t rlc_feature_version ;
uint32_t rlc_srlc_fw_version ;
uint32_t rlc_srlc_feature_version ;
uint32_t rlc_srlg_fw_version ;
uint32_t rlc_srlg_feature_version ;
uint32_t rlc_srls_fw_version ;
uint32_t rlc_srls_feature_version ;
uint32_t mec_feature_version ;
uint32_t mec2_feature_version ;
2018-08-17 18:26:41 +08:00
bool mec_fw_write_wait ;
bool me_fw_write_wait ;
2018-08-02 16:12:39 +08:00
struct amdgpu_ring gfx_ring [ AMDGPU_MAX_GFX_RINGS ] ;
unsigned num_gfx_rings ;
struct amdgpu_ring compute_ring [ AMDGPU_MAX_COMPUTE_RINGS ] ;
unsigned num_compute_rings ;
struct amdgpu_irq_src eop_irq ;
struct amdgpu_irq_src priv_reg_irq ;
struct amdgpu_irq_src priv_inst_irq ;
struct amdgpu_irq_src cp_ecc_error_irq ;
struct amdgpu_irq_src sq_irq ;
struct sq_work sq_work ;
/* gfx status */
uint32_t gfx_current_status ;
/* ce ram size*/
unsigned ce_ram_size ;
struct amdgpu_cu_info cu_info ;
const struct amdgpu_gfx_funcs * funcs ;
/* reset mask */
uint32_t grbm_soft_reset ;
uint32_t srbm_soft_reset ;
2018-09-29 15:27:02 +08:00
2018-08-02 16:12:39 +08:00
/* NGG */
struct amdgpu_ngg ngg ;
/* gfx off */
bool gfx_off_state ; /* true: enabled, false: disabled */
struct mutex gfx_off_mutex ;
uint32_t gfx_off_req_count ; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
struct delayed_work gfx_off_delay_work ;
/* pipe reservation */
struct mutex pipe_reserve_mutex ;
DECLARE_BITMAP ( pipe_reserve_bitmap , AMDGPU_MAX_COMPUTE_QUEUES ) ;
} ;
# define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
# define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
# define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
2017-06-07 15:27:52 -04:00
2017-06-06 17:41:20 -04:00
/**
* amdgpu_gfx_create_bitmask - create a bitmask
*
* @ bit_width : length of the mask
*
* create a variable length bit mask .
* Returns the bitmask .
*/
static inline u32 amdgpu_gfx_create_bitmask ( u32 bit_width )
{
return ( u32 ) ( ( 1ULL < < bit_width ) - 1 ) ;
}
2018-08-02 16:12:39 +08:00
int amdgpu_gfx_scratch_get ( struct amdgpu_device * adev , uint32_t * reg ) ;
void amdgpu_gfx_scratch_free ( struct amdgpu_device * adev , uint32_t reg ) ;
2017-06-07 12:59:29 -04:00
2018-08-02 16:12:39 +08:00
void amdgpu_gfx_parse_disable_cu ( unsigned * mask , unsigned max_se ,
unsigned max_sh ) ;
2017-06-07 12:59:29 -04:00
2018-08-02 16:12:39 +08:00
int amdgpu_gfx_kiq_init_ring ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring ,
struct amdgpu_irq_src * irq ) ;
2017-06-07 12:59:29 -04:00
2018-08-02 16:12:39 +08:00
void amdgpu_gfx_kiq_free_ring ( struct amdgpu_ring * ring ,
struct amdgpu_irq_src * irq ) ;
2017-06-07 12:59:29 -04:00
2018-08-02 16:12:39 +08:00
void amdgpu_gfx_kiq_fini ( struct amdgpu_device * adev ) ;
int amdgpu_gfx_kiq_init ( struct amdgpu_device * adev ,
unsigned hpd_size ) ;
int amdgpu_gfx_compute_mqd_sw_init ( struct amdgpu_device * adev ,
unsigned mqd_size ) ;
void amdgpu_gfx_compute_mqd_sw_fini ( struct amdgpu_device * adev ) ;
void amdgpu_gfx_compute_queue_acquire ( struct amdgpu_device * adev ) ;
int amdgpu_gfx_queue_to_bit ( struct amdgpu_device * adev , int mec ,
int pipe , int queue ) ;
void amdgpu_gfx_bit_to_queue ( struct amdgpu_device * adev , int bit ,
int * mec , int * pipe , int * queue ) ;
bool amdgpu_gfx_is_mec_queue_enabled ( struct amdgpu_device * adev , int mec ,
int pipe , int queue ) ;
2018-08-06 20:14:51 +08:00
void amdgpu_gfx_off_ctrl ( struct amdgpu_device * adev , bool enable ) ;
2017-06-07 12:59:29 -04:00
2015-04-20 16:55:21 -04:00
# endif