2019-06-03 07:44:50 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
2019-12-03 15:16:14 +00:00
* Copyright ( c ) 2014 , 2017 , 2019 The Linux Foundation . All rights reserved .
2013-07-19 12:59:32 -04:00
*/
# ifndef __ADRENO_GPU_H__
# define __ADRENO_GPU_H__
# include <linux/firmware.h>
2018-11-02 09:25:23 -06:00
# include <linux/iopoll.h>
2013-07-19 12:59:32 -04:00
# include "msm_gpu.h"
# include "adreno_common.xml.h"
# include "adreno_pm4.xml.h"
2014-09-08 10:57:28 -06:00
# define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
2016-11-28 12:28:29 -07:00
# define REG_SKIP ~0
# define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
2014-09-08 10:57:28 -06:00
/**
* adreno_regs : List of registers that are used in across all
* 3 D devices . Each device type has different offset value for the same
* register , so an array of register offsets are declared for every device
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
REG_ADRENO_CP_RB_BASE ,
2016-11-28 12:28:29 -07:00
REG_ADRENO_CP_RB_BASE_HI ,
2014-09-08 10:57:28 -06:00
REG_ADRENO_CP_RB_RPTR_ADDR ,
2016-11-28 12:28:29 -07:00
REG_ADRENO_CP_RB_RPTR_ADDR_HI ,
2014-09-08 10:57:28 -06:00
REG_ADRENO_CP_RB_RPTR ,
REG_ADRENO_CP_RB_WPTR ,
REG_ADRENO_CP_RB_CNTL ,
REG_ADRENO_REGISTER_MAX ,
} ;
2018-02-01 12:15:16 -07:00
enum {
ADRENO_FW_PM4 = 0 ,
2018-08-06 11:33:24 -06:00
ADRENO_FW_SQE = 0 , /* a6xx */
2018-02-01 12:15:16 -07:00
ADRENO_FW_PFP = 1 ,
2018-08-06 11:33:24 -06:00
ADRENO_FW_GMU = 1 , /* a6xx */
2018-02-01 12:15:16 -07:00
ADRENO_FW_GPMU = 2 ,
ADRENO_FW_MAX ,
} ;
2016-11-28 12:28:33 -07:00
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1 ,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2 ,
2019-06-11 11:54:00 -07:00
ADRENO_QUIRK_LMLOADKILL_DISABLE = 3 ,
2016-11-28 12:28:33 -07:00
} ;
2013-07-19 12:59:32 -04:00
struct adreno_rev {
uint8_t core ;
uint8_t major ;
uint8_t minor ;
uint8_t patchid ;
} ;
# define ADRENO_REV(core, major, minor, patchid) \
( ( struct adreno_rev ) { core , major , minor , patchid } )
struct adreno_gpu_funcs {
struct msm_gpu_funcs base ;
2016-02-22 06:26:21 -05:00
int ( * get_timestamp ) ( struct msm_gpu * gpu , uint64_t * value ) ;
2013-07-19 12:59:32 -04:00
} ;
2014-09-05 13:30:27 -04:00
struct adreno_info {
struct adreno_rev rev ;
uint32_t revn ;
const char * name ;
2018-02-01 12:15:16 -07:00
const char * fw [ ADRENO_FW_MAX ] ;
2014-09-05 13:30:27 -04:00
uint32_t gmem ;
2017-01-30 11:15:14 -05:00
enum adreno_quirks quirks ;
2014-09-05 13:30:27 -04:00
struct msm_gpu * ( * init ) ( struct drm_device * dev ) ;
2017-05-17 08:45:29 -06:00
const char * zapfw ;
2018-05-07 16:47:50 -06:00
u32 inactive_period ;
2014-09-05 13:30:27 -04:00
} ;
const struct adreno_info * adreno_info ( struct adreno_rev rev ) ;
2013-07-19 12:59:32 -04:00
struct adreno_gpu {
struct msm_gpu base ;
struct adreno_rev rev ;
const struct adreno_info * info ;
2013-12-05 17:39:53 -05:00
uint32_t gmem ; /* actual gmem size */
2013-07-19 12:59:32 -04:00
uint32_t revn ; /* numeric revision name */
const struct adreno_gpu_funcs * funcs ;
2014-09-05 15:05:38 -04:00
/* interesting register offsets to dump: */
const unsigned int * registers ;
2017-10-16 10:46:23 -04:00
/*
* Are we loading fw from legacy path ? Prior to addition
* of gpu firmware to linux - firmware , the fw files were
* placed in toplevel firmware directory , following qcom ' s
* android kernel . But linux - firmware preferred they be
* placed in a ' qcom ' subdirectory .
*
* For backwards compatibility , we try first to load from
* the new path , using request_firmware_direct ( ) to avoid
* any potential timeout waiting for usermode helper , then
* fall back to the old path ( with direct load ) . And
* finally fall back to request_firmware ( ) with the new
* path to allow the usermode helper .
*/
enum {
FW_LOCATION_UNKNOWN = 0 ,
FW_LOCATION_NEW , /* /lib/firmware/qcom/$fwfile */
FW_LOCATION_LEGACY , /* /lib/firmware/$fwfile */
FW_LOCATION_HELPER ,
} fwloc ;
2013-07-19 12:59:32 -04:00
/* firmware: */
2018-02-01 12:15:16 -07:00
const struct firmware * fw [ ADRENO_FW_MAX ] ;
2013-07-19 12:59:32 -04:00
2014-09-08 10:57:28 -06:00
/*
* Register offsets are different between some GPUs .
* GPU specific offsets will be exported by GPU specific
* code ( a3xx_gpu . c ) and stored in this common location .
*/
const unsigned int * reg_offsets ;
2013-07-19 12:59:32 -04:00
} ;
# define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
2019-08-23 05:16:36 -07:00
struct adreno_ocmem {
struct ocmem * ocmem ;
unsigned long base ;
void * hdl ;
} ;
2013-07-19 12:59:32 -04:00
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev ;
} ;
2014-01-11 16:11:59 -05:00
# define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
# define spin_until(X) ({ \
int __ret = - ETIMEDOUT ; \
unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT ; \
do { \
if ( X ) { \
__ret = 0 ; \
break ; \
} \
} while ( time_before ( jiffies , __t ) ) ; \
__ret ; \
} )
2018-11-21 20:52:32 -05:00
static inline bool adreno_is_a2xx ( struct adreno_gpu * gpu )
{
return ( gpu - > revn < 300 ) ;
}
static inline bool adreno_is_a20x ( struct adreno_gpu * gpu )
{
return ( gpu - > revn < 210 ) ;
}
static inline bool adreno_is_a225 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 225 ;
}
2013-07-19 12:59:32 -04:00
static inline bool adreno_is_a3xx ( struct adreno_gpu * gpu )
{
return ( gpu - > revn > = 300 ) & & ( gpu - > revn < 400 ) ;
}
static inline bool adreno_is_a305 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 305 ;
}
2015-05-06 13:14:30 -04:00
static inline bool adreno_is_a306 ( struct adreno_gpu * gpu )
{
/* yes, 307, because a305c is 306 */
return gpu - > revn = = 307 ;
}
2013-07-19 12:59:32 -04:00
static inline bool adreno_is_a320 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 320 ;
}
static inline bool adreno_is_a330 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 330 ;
}
2013-12-05 17:39:53 -05:00
static inline bool adreno_is_a330v2 ( struct adreno_gpu * gpu )
{
return adreno_is_a330 ( gpu ) & & ( gpu - > rev . patchid > 0 ) ;
}
2014-09-08 13:40:16 -06:00
static inline bool adreno_is_a4xx ( struct adreno_gpu * gpu )
{
return ( gpu - > revn > = 400 ) & & ( gpu - > revn < 500 ) ;
}
static inline int adreno_is_a420 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 420 ;
}
2016-02-18 16:50:00 -08:00
static inline int adreno_is_a430 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 430 ;
}
2019-10-31 11:44:02 +01:00
static inline int adreno_is_a510 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 510 ;
}
2016-11-28 12:28:33 -07:00
static inline int adreno_is_a530 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 530 ;
}
2019-06-11 11:54:00 -07:00
static inline int adreno_is_a540 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 540 ;
}
2019-12-03 15:16:14 +00:00
static inline int adreno_is_a618 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 618 ;
}
static inline int adreno_is_a630 ( struct adreno_gpu * gpu )
{
return gpu - > revn = = 630 ;
}
2013-07-19 12:59:32 -04:00
int adreno_get_param ( struct msm_gpu * gpu , uint32_t param , uint64_t * value ) ;
2017-10-16 10:13:15 -04:00
const struct firmware * adreno_request_fw ( struct adreno_gpu * adreno_gpu ,
const char * fwname ) ;
2018-02-01 12:15:17 -07:00
struct drm_gem_object * adreno_fw_create_bo ( struct msm_gpu * gpu ,
const struct firmware * fw , u64 * iova ) ;
2013-07-19 12:59:32 -04:00
int adreno_hw_init ( struct msm_gpu * gpu ) ;
2013-08-24 14:20:38 -04:00
void adreno_recover ( struct msm_gpu * gpu ) ;
2016-05-03 09:46:49 -04:00
void adreno_submit ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx ) ;
2017-10-20 11:06:57 -06:00
void adreno_flush ( struct msm_gpu * gpu , struct msm_ringbuffer * ring ) ;
bool adreno_idle ( struct msm_gpu * gpu , struct msm_ringbuffer * ring ) ;
2018-07-24 10:33:27 -06:00
# if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
2018-07-24 10:33:25 -06:00
void adreno_show ( struct msm_gpu * gpu , struct msm_gpu_state * state ,
2018-07-24 10:33:27 -06:00
struct drm_printer * p ) ;
2013-07-19 12:59:32 -04:00
# endif
2015-04-19 10:14:09 -04:00
void adreno_dump_info ( struct msm_gpu * gpu ) ;
2013-12-22 10:29:43 -05:00
void adreno_dump ( struct msm_gpu * gpu ) ;
2017-10-20 11:06:57 -06:00
void adreno_wait_ring ( struct msm_ringbuffer * ring , uint32_t ndwords ) ;
struct msm_ringbuffer * adreno_active_ring ( struct msm_gpu * gpu ) ;
2013-07-19 12:59:32 -04:00
2019-08-23 05:16:36 -07:00
int adreno_gpu_ocmem_init ( struct device * dev , struct adreno_gpu * adreno_gpu ,
struct adreno_ocmem * ocmem ) ;
void adreno_gpu_ocmem_cleanup ( struct adreno_ocmem * ocmem ) ;
2013-07-19 12:59:32 -04:00
int adreno_gpu_init ( struct drm_device * drm , struct platform_device * pdev ,
2017-10-20 11:06:57 -06:00
struct adreno_gpu * gpu , const struct adreno_gpu_funcs * funcs ,
int nr_rings ) ;
2013-07-19 12:59:32 -04:00
void adreno_gpu_cleanup ( struct adreno_gpu * gpu ) ;
2018-08-06 11:33:22 -06:00
int adreno_load_fw ( struct adreno_gpu * adreno_gpu ) ;
2013-07-19 12:59:32 -04:00
2018-07-24 10:33:30 -06:00
void adreno_gpu_state_destroy ( struct msm_gpu_state * state ) ;
int adreno_gpu_state_get ( struct msm_gpu * gpu , struct msm_gpu_state * state ) ;
2018-07-24 10:33:27 -06:00
int adreno_gpu_state_put ( struct msm_gpu_state * state ) ;
2018-07-24 10:33:24 -06:00
2019-04-19 13:46:14 -06:00
/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
*/
int adreno_zap_shader_load ( struct msm_gpu * gpu , u32 pasid ) ;
2013-07-19 12:59:32 -04:00
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
OUT_PKT0 ( struct msm_ringbuffer * ring , uint16_t regindx , uint16_t cnt )
{
2017-10-20 11:06:57 -06:00
adreno_wait_ring ( ring , cnt + 1 ) ;
2013-07-19 12:59:32 -04:00
OUT_RING ( ring , CP_TYPE0_PKT | ( ( cnt - 1 ) < < 16 ) | ( regindx & 0x7FFF ) ) ;
}
/* no-op packet: */
static inline void
OUT_PKT2 ( struct msm_ringbuffer * ring )
{
2017-10-20 11:06:57 -06:00
adreno_wait_ring ( ring , 1 ) ;
2013-07-19 12:59:32 -04:00
OUT_RING ( ring , CP_TYPE2_PKT ) ;
}
static inline void
OUT_PKT3 ( struct msm_ringbuffer * ring , uint8_t opcode , uint16_t cnt )
{
2017-10-20 11:06:57 -06:00
adreno_wait_ring ( ring , cnt + 1 ) ;
2013-07-19 12:59:32 -04:00
OUT_RING ( ring , CP_TYPE3_PKT | ( ( cnt - 1 ) < < 16 ) | ( ( opcode & 0xFF ) < < 8 ) ) ;
}
2016-11-28 12:28:30 -07:00
static inline u32 PM4_PARITY ( u32 val )
{
return ( 0x9669 > > ( 0xF & ( val ^
( val > > 4 ) ^ ( val > > 8 ) ^ ( val > > 12 ) ^
( val > > 16 ) ^ ( ( val ) > > 20 ) ^ ( val > > 24 ) ^
( val > > 28 ) ) ) ) & 1 ;
}
/* Maximum number of values that can be executed for one opcode */
# define TYPE4_MAX_PAYLOAD 127
# define PKT4(_reg, _cnt) \
( CP_TYPE4_PKT | ( ( _cnt ) < < 0 ) | ( PM4_PARITY ( ( _cnt ) ) < < 7 ) | \
( ( ( _reg ) & 0x3FFFF ) < < 8 ) | ( PM4_PARITY ( ( _reg ) ) < < 27 ) )
static inline void
OUT_PKT4 ( struct msm_ringbuffer * ring , uint16_t regindx , uint16_t cnt )
{
2017-10-20 11:06:57 -06:00
adreno_wait_ring ( ring , cnt + 1 ) ;
2016-11-28 12:28:30 -07:00
OUT_RING ( ring , PKT4 ( regindx , cnt ) ) ;
}
static inline void
OUT_PKT7 ( struct msm_ringbuffer * ring , uint8_t opcode , uint16_t cnt )
{
2017-10-20 11:06:57 -06:00
adreno_wait_ring ( ring , cnt + 1 ) ;
2016-11-28 12:28:30 -07:00
OUT_RING ( ring , CP_TYPE7_PKT | ( cnt < < 0 ) | ( PM4_PARITY ( cnt ) < < 15 ) |
( ( opcode & 0x7F ) < < 16 ) | ( PM4_PARITY ( opcode ) < < 23 ) ) ;
}
2014-09-08 10:57:28 -06:00
/*
2016-11-28 12:28:29 -07:00
* adreno_reg_check ( ) - Checks the validity of a register enum
2014-09-08 10:57:28 -06:00
* @ gpu : Pointer to struct adreno_gpu
* @ offset_name : The register enum that is checked
*/
static inline bool adreno_reg_check ( struct adreno_gpu * gpu ,
enum adreno_regs offset_name )
{
2019-12-30 22:41:02 +03:00
BUG_ON ( offset_name > = REG_ADRENO_REGISTER_MAX | | ! gpu - > reg_offsets [ offset_name ] ) ;
2016-11-28 12:28:29 -07:00
/*
* REG_SKIP is a special value that tell us that the register in
* question isn ' t implemented on target but don ' t trigger a BUG ( ) . This
* is used to cleanly implement adreno_gpu_write64 ( ) and
* adreno_gpu_read64 ( ) in a generic fashion
*/
if ( gpu - > reg_offsets [ offset_name ] = = REG_SKIP )
return false ;
2014-09-08 10:57:28 -06:00
return true ;
}
static inline u32 adreno_gpu_read ( struct adreno_gpu * gpu ,
enum adreno_regs offset_name )
{
u32 reg = gpu - > reg_offsets [ offset_name ] ;
u32 val = 0 ;
if ( adreno_reg_check ( gpu , offset_name ) )
val = gpu_read ( & gpu - > base , reg - 1 ) ;
return val ;
}
static inline void adreno_gpu_write ( struct adreno_gpu * gpu ,
enum adreno_regs offset_name , u32 data )
{
u32 reg = gpu - > reg_offsets [ offset_name ] ;
if ( adreno_reg_check ( gpu , offset_name ) )
gpu_write ( & gpu - > base , reg - 1 , data ) ;
}
2013-07-19 12:59:32 -04:00
2018-11-21 20:52:32 -05:00
struct msm_gpu * a2xx_gpu_init ( struct drm_device * dev ) ;
2016-10-22 17:17:44 +08:00
struct msm_gpu * a3xx_gpu_init ( struct drm_device * dev ) ;
struct msm_gpu * a4xx_gpu_init ( struct drm_device * dev ) ;
2016-11-28 12:28:33 -07:00
struct msm_gpu * a5xx_gpu_init ( struct drm_device * dev ) ;
2018-08-06 11:33:24 -06:00
struct msm_gpu * a6xx_gpu_init ( struct drm_device * dev ) ;
2016-10-22 17:17:44 +08:00
2016-11-28 12:28:29 -07:00
static inline void adreno_gpu_write64 ( struct adreno_gpu * gpu ,
enum adreno_regs lo , enum adreno_regs hi , u64 data )
{
adreno_gpu_write ( gpu , lo , lower_32_bits ( data ) ) ;
adreno_gpu_write ( gpu , hi , upper_32_bits ( data ) ) ;
}
2017-10-20 11:07:01 -06:00
static inline uint32_t get_wptr ( struct msm_ringbuffer * ring )
{
return ( ring - > cur - ring - > start ) % ( MSM_GPU_RINGBUFFER_SZ > > 2 ) ;
}
2016-11-28 12:28:33 -07:00
/*
* Given a register and a count , return a value to program into
* REG_CP_PROTECT_REG ( n ) - this will block both reads and writes for _len
* registers starting at _reg .
*
* The register base needs to be a multiple of the length . If it is not , the
* hardware will quietly mask off the bits for you and shift the size . For
* example , if you intend the protection to start at 0x07 for a length of 4
* ( 0x07 - 0x0A ) the hardware will actually protect ( 0x04 - 0x07 ) which might
* expose registers you intended to protect !
*/
# define ADRENO_PROTECT_RW(_reg, _len) \
( ( 1 < < 30 ) | ( 1 < < 29 ) | \
( ( ilog2 ( ( _len ) ) & 0x1F ) < < 24 ) | ( ( ( _reg ) < < 2 ) & 0xFFFFF ) )
/*
* Same as above , but allow reads over the range . For areas of mixed use ( such
* as performance counters ) this allows us to protect a much larger range with a
* single register
*/
# define ADRENO_PROTECT_RDONLY(_reg, _len) \
( ( 1 < < 29 ) \
( ( ilog2 ( ( _len ) ) & 0x1F ) < < 24 ) | ( ( ( _reg ) < < 2 ) & 0xFFFFF ) )
2018-11-02 09:25:23 -06:00
# define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
readl_poll_timeout ( ( gpu ) - > mmio + ( ( addr ) < < 2 ) , val , cond , \
interval , timeout )
2013-07-19 12:59:32 -04:00
# endif /* __ADRENO_GPU_H__ */