2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
2014-09-08 10:57:28 -06:00
* Copyright ( c ) 2014 The Linux Foundation . All rights reserved .
*
2013-07-19 12:59:32 -04:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2017-11-21 12:40:55 -07:00
# include <linux/pm_opp.h>
2013-07-19 12:59:32 -04:00
# include "adreno_gpu.h"
# include "msm_gem.h"
2013-11-16 12:56:06 -05:00
# include "msm_mmu.h"
2013-07-19 12:59:32 -04:00
int adreno_get_param ( struct msm_gpu * gpu , uint32_t param , uint64_t * value )
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
switch ( param ) {
case MSM_PARAM_GPU_ID :
* value = adreno_gpu - > info - > revn ;
return 0 ;
case MSM_PARAM_GMEM_SIZE :
2013-12-05 17:39:53 -05:00
* value = adreno_gpu - > gmem ;
2013-07-19 12:59:32 -04:00
return 0 ;
2017-03-07 10:02:53 -07:00
case MSM_PARAM_GMEM_BASE :
* value = 0x100000 ;
return 0 ;
2014-02-04 14:16:04 -05:00
case MSM_PARAM_CHIP_ID :
* value = adreno_gpu - > rev . patchid |
( adreno_gpu - > rev . minor < < 8 ) |
( adreno_gpu - > rev . major < < 16 ) |
( adreno_gpu - > rev . core < < 24 ) ;
return 0 ;
2016-02-09 12:05:30 -05:00
case MSM_PARAM_MAX_FREQ :
* value = adreno_gpu - > base . fast_rate ;
return 0 ;
2016-02-22 06:26:21 -05:00
case MSM_PARAM_TIMESTAMP :
2017-07-28 16:17:08 +05:30
if ( adreno_gpu - > funcs - > get_timestamp ) {
int ret ;
pm_runtime_get_sync ( & gpu - > pdev - > dev ) ;
ret = adreno_gpu - > funcs - > get_timestamp ( gpu , value ) ;
pm_runtime_put_autosuspend ( & gpu - > pdev - > dev ) ;
return ret ;
}
2016-02-22 06:26:21 -05:00
return - EINVAL ;
2017-10-20 11:06:58 -06:00
case MSM_PARAM_NR_RINGS :
* value = gpu - > nr_rings ;
return 0 ;
2013-07-19 12:59:32 -04:00
default :
DBG ( " %s: invalid param: %u " , gpu - > name , param ) ;
return - EINVAL ;
}
}
2017-10-16 10:13:15 -04:00
const struct firmware *
adreno_request_fw ( struct adreno_gpu * adreno_gpu , const char * fwname )
2017-10-16 09:22:38 -04:00
{
struct drm_device * drm = adreno_gpu - > base . dev ;
2017-10-16 10:13:15 -04:00
const struct firmware * fw = NULL ;
2017-10-16 10:46:23 -04:00
char newname [ strlen ( " qcom/ " ) + strlen ( fwname ) + 1 ] ;
2017-10-16 09:22:38 -04:00
int ret ;
2017-10-16 10:46:23 -04:00
sprintf ( newname , " qcom/%s " , fwname ) ;
/*
* Try first to load from qcom / $ fwfile using a direct load ( to avoid
* a potential timeout waiting for usermode helper )
*/
if ( ( adreno_gpu - > fwloc = = FW_LOCATION_UNKNOWN ) | |
( adreno_gpu - > fwloc = = FW_LOCATION_NEW ) ) {
ret = request_firmware_direct ( & fw , newname , drm - > dev ) ;
if ( ! ret ) {
dev_info ( drm - > dev , " loaded %s from new location \n " ,
newname ) ;
adreno_gpu - > fwloc = FW_LOCATION_NEW ;
return fw ;
} else if ( adreno_gpu - > fwloc ! = FW_LOCATION_UNKNOWN ) {
dev_err ( drm - > dev , " failed to load %s: %d \n " ,
newname , ret ) ;
return ERR_PTR ( ret ) ;
}
}
/*
* Then try the legacy location without qcom / prefix
*/
if ( ( adreno_gpu - > fwloc = = FW_LOCATION_UNKNOWN ) | |
( adreno_gpu - > fwloc = = FW_LOCATION_LEGACY ) ) {
ret = request_firmware_direct ( & fw , fwname , drm - > dev ) ;
if ( ! ret ) {
dev_info ( drm - > dev , " loaded %s from legacy location \n " ,
newname ) ;
adreno_gpu - > fwloc = FW_LOCATION_LEGACY ;
return fw ;
} else if ( adreno_gpu - > fwloc ! = FW_LOCATION_UNKNOWN ) {
dev_err ( drm - > dev , " failed to load %s: %d \n " ,
fwname , ret ) ;
return ERR_PTR ( ret ) ;
}
}
/*
* Finally fall back to request_firmware ( ) for cases where the
* usermode helper is needed ( I think mainly android )
*/
if ( ( adreno_gpu - > fwloc = = FW_LOCATION_UNKNOWN ) | |
( adreno_gpu - > fwloc = = FW_LOCATION_HELPER ) ) {
ret = request_firmware ( & fw , newname , drm - > dev ) ;
if ( ! ret ) {
dev_info ( drm - > dev , " loaded %s with helper \n " ,
newname ) ;
adreno_gpu - > fwloc = FW_LOCATION_HELPER ;
return fw ;
} else if ( adreno_gpu - > fwloc ! = FW_LOCATION_UNKNOWN ) {
dev_err ( drm - > dev , " failed to load %s: %d \n " ,
newname , ret ) ;
return ERR_PTR ( ret ) ;
}
2017-10-16 10:13:15 -04:00
}
2017-10-16 10:46:23 -04:00
dev_err ( drm - > dev , " failed to load %s \n " , fwname ) ;
return ERR_PTR ( - ENOENT ) ;
2017-10-16 10:13:15 -04:00
}
static int adreno_load_fw ( struct adreno_gpu * adreno_gpu )
{
2018-02-01 12:15:16 -07:00
int i ;
2017-10-16 10:13:15 -04:00
2018-02-01 12:15:16 -07:00
for ( i = 0 ; i < ARRAY_SIZE ( adreno_gpu - > info - > fw ) ; i + + ) {
const struct firmware * fw ;
2017-10-16 09:22:38 -04:00
2018-02-01 12:15:16 -07:00
if ( ! adreno_gpu - > info - > fw [ i ] )
continue ;
2017-10-16 09:22:38 -04:00
2018-02-01 12:15:16 -07:00
/* Skip if the firmware has already been loaded */
if ( adreno_gpu - > fw [ i ] )
continue ;
fw = adreno_request_fw ( adreno_gpu , adreno_gpu - > info - > fw [ i ] ) ;
if ( IS_ERR ( fw ) )
return PTR_ERR ( fw ) ;
adreno_gpu - > fw [ i ] = fw ;
2017-10-16 09:22:38 -04:00
}
return 0 ;
}
2018-02-01 12:15:17 -07:00
struct drm_gem_object * adreno_fw_create_bo ( struct msm_gpu * gpu ,
const struct firmware * fw , u64 * iova )
{
struct drm_gem_object * bo ;
void * ptr ;
ptr = msm_gem_kernel_new_locked ( gpu - > dev , fw - > size - 4 ,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY , gpu - > aspace , & bo , iova ) ;
if ( IS_ERR ( ptr ) )
return ERR_CAST ( ptr ) ;
memcpy ( ptr , & fw - > data [ 4 ] , fw - > size - 4 ) ;
msm_gem_put_vaddr ( bo ) ;
return bo ;
}
2013-07-19 12:59:32 -04:00
int adreno_hw_init ( struct msm_gpu * gpu )
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
2017-10-20 11:06:57 -06:00
int ret , i ;
2013-07-19 12:59:32 -04:00
DBG ( " %s " , gpu - > name ) ;
2017-10-16 09:22:38 -04:00
ret = adreno_load_fw ( adreno_gpu ) ;
if ( ret )
return ret ;
2017-10-20 11:06:57 -06:00
for ( i = 0 ; i < gpu - > nr_rings ; i + + ) {
struct msm_ringbuffer * ring = gpu - > rb [ i ] ;
2014-07-09 22:08:15 -04:00
2017-10-20 11:06:57 -06:00
if ( ! ring )
continue ;
2017-02-12 11:42:14 -05:00
2017-10-20 11:06:57 -06:00
ret = msm_gem_get_iova ( ring - > bo , gpu - > aspace , & ring - > iova ) ;
if ( ret ) {
ring - > iova = 0 ;
dev_err ( gpu - > dev - > dev ,
" could not map ringbuffer %d: %d \n " , i , ret ) ;
return ret ;
}
ring - > cur = ring - > start ;
2017-10-20 11:06:59 -06:00
ring - > next = ring - > start ;
2017-10-20 11:06:57 -06:00
/* reset completed fence seqno: */
ring - > memptrs - > fence = ring - > seqno ;
ring - > memptrs - > rptr = 0 ;
}
2017-02-12 11:42:14 -05:00
2017-10-20 11:07:00 -06:00
/*
* Setup REG_CP_RB_CNTL . The same value is used across targets ( with
* the excpetion of A430 that disables the RPTR shadow ) - the cacluation
* for the ringbuffer size and block size is moved to msm_gpu . h for the
* pre - processor to deal with and the A430 variant is ORed in here
*/
2014-09-08 10:57:28 -06:00
adreno_gpu_write ( adreno_gpu , REG_ADRENO_CP_RB_CNTL ,
2017-10-20 11:07:00 -06:00
MSM_GPU_RB_CNTL_DEFAULT |
2017-10-20 11:06:57 -06:00
( adreno_is_a430 ( adreno_gpu ) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0 ) ) ;
2013-07-19 12:59:32 -04:00
2017-10-20 11:06:57 -06:00
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
2016-11-28 12:28:29 -07:00
adreno_gpu_write64 ( adreno_gpu , REG_ADRENO_CP_RB_BASE ,
2017-10-20 11:06:57 -06:00
REG_ADRENO_CP_RB_BASE_HI , gpu - > rb [ 0 ] - > iova ) ;
2013-07-19 12:59:32 -04:00
2016-11-28 12:28:29 -07:00
if ( ! adreno_is_a430 ( adreno_gpu ) ) {
adreno_gpu_write64 ( adreno_gpu , REG_ADRENO_CP_RB_RPTR_ADDR ,
2017-10-20 11:06:57 -06:00
REG_ADRENO_CP_RB_RPTR_ADDR_HI ,
rbmemptr ( gpu - > rb [ 0 ] , rptr ) ) ;
2016-11-28 12:28:29 -07:00
}
2013-07-19 12:59:32 -04:00
return 0 ;
}
2016-02-18 16:50:02 -08:00
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
2017-10-20 11:06:57 -06:00
static uint32_t get_rptr ( struct adreno_gpu * adreno_gpu ,
struct msm_ringbuffer * ring )
2016-02-18 16:50:02 -08:00
{
if ( adreno_is_a430 ( adreno_gpu ) )
2017-10-20 11:06:57 -06:00
return ring - > memptrs - > rptr = adreno_gpu_read (
2016-02-18 16:50:02 -08:00
adreno_gpu , REG_ADRENO_CP_RB_RPTR ) ;
else
2017-10-20 11:06:57 -06:00
return ring - > memptrs - > rptr ;
}
struct msm_ringbuffer * adreno_active_ring ( struct msm_gpu * gpu )
{
return gpu - > rb [ 0 ] ;
2013-07-19 12:59:32 -04:00
}
2013-08-24 14:20:38 -04:00
void adreno_recover ( struct msm_gpu * gpu )
{
struct drm_device * dev = gpu - > dev ;
int ret ;
2017-02-10 15:36:33 -05:00
// XXX pm-runtime?? we *need* the device to be off after this
// so maybe continuing to call ->pm_suspend/resume() is better?
2013-08-24 14:20:38 -04:00
gpu - > funcs - > pm_suspend ( gpu ) ;
gpu - > funcs - > pm_resume ( gpu ) ;
2016-11-28 12:28:32 -07:00
2017-02-10 15:36:33 -05:00
ret = msm_gpu_hw_init ( gpu ) ;
2013-08-24 14:20:38 -04:00
if ( ret ) {
dev_err ( dev - > dev , " gpu hw init failed: %d \n " , ret ) ;
/* hmm, oh well? */
}
}
2016-05-03 09:46:49 -04:00
void adreno_submit ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx )
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
struct msm_drm_private * priv = gpu - > dev - > dev_private ;
2017-10-20 11:06:57 -06:00
struct msm_ringbuffer * ring = submit - > ring ;
2016-06-01 14:17:40 -04:00
unsigned i ;
2013-07-19 12:59:32 -04:00
for ( i = 0 ; i < submit - > nr_cmds ; i + + ) {
switch ( submit - > cmd [ i ] . type ) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF :
/* ignore IB-targets */
break ;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF :
/* ignore if there has not been a ctx switch: */
if ( priv - > lastctx = = ctx )
break ;
case MSM_SUBMIT_CMD_BUF :
2016-02-18 16:50:00 -08:00
OUT_PKT3 ( ring , adreno_is_a430 ( adreno_gpu ) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD , 2 ) ;
2017-10-20 11:07:01 -06:00
OUT_RING ( ring , lower_32_bits ( submit - > cmd [ i ] . iova ) ) ;
2013-07-19 12:59:32 -04:00
OUT_RING ( ring , submit - > cmd [ i ] . size ) ;
2016-06-01 14:17:40 -04:00
OUT_PKT2 ( ring ) ;
2013-07-19 12:59:32 -04:00
break ;
}
}
OUT_PKT0 ( ring , REG_AXXX_CP_SCRATCH_REG2 , 1 ) ;
2017-10-20 11:06:57 -06:00
OUT_RING ( ring , submit - > seqno ) ;
2013-07-19 12:59:32 -04:00
2014-09-08 13:40:16 -06:00
if ( adreno_is_a3xx ( adreno_gpu ) | | adreno_is_a4xx ( adreno_gpu ) ) {
2013-07-19 12:59:32 -04:00
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed :
*/
OUT_PKT3 ( ring , CP_EVENT_WRITE , 1 ) ;
OUT_RING ( ring , HLSQ_FLUSH ) ;
OUT_PKT3 ( ring , CP_WAIT_FOR_IDLE , 1 ) ;
OUT_RING ( ring , 0x00000000 ) ;
}
2018-02-13 22:46:58 -08:00
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
2013-07-19 12:59:32 -04:00
OUT_PKT3 ( ring , CP_EVENT_WRITE , 3 ) ;
2018-02-13 22:46:58 -08:00
OUT_RING ( ring , CACHE_FLUSH_TS | BIT ( 31 ) ) ;
2017-10-20 11:06:57 -06:00
OUT_RING ( ring , rbmemptr ( ring , fence ) ) ;
OUT_RING ( ring , submit - > seqno ) ;
2013-07-19 12:59:32 -04:00
#if 0
if ( adreno_is_a3xx ( adreno_gpu ) ) {
/* Dummy set-constant to trigger context rollover */
OUT_PKT3 ( ring , CP_SET_CONSTANT , 2 ) ;
OUT_RING ( ring , CP_REG ( REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG ) ) ;
OUT_RING ( ring , 0x00000000 ) ;
}
# endif
2017-10-20 11:06:57 -06:00
gpu - > funcs - > flush ( gpu , ring ) ;
2013-07-19 12:59:32 -04:00
}
2017-10-20 11:06:57 -06:00
void adreno_flush ( struct msm_gpu * gpu , struct msm_ringbuffer * ring )
2013-07-19 12:59:32 -04:00
{
2014-09-08 10:57:28 -06:00
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
2016-12-20 08:54:29 -07:00
uint32_t wptr ;
2017-10-20 11:06:59 -06:00
/* Copy the shadow to the actual register */
ring - > cur = ring - > next ;
2016-12-20 08:54:29 -07:00
/*
* Mask wptr value that we calculate to fit in the HW range . This is
* to account for the possibility that the last command fit exactly into
* the ringbuffer and rb - > next hasn ' t wrapped to zero yet
*/
2017-10-20 11:07:01 -06:00
wptr = get_wptr ( ring ) ;
2013-07-19 12:59:32 -04:00
/* ensure writes to ringbuffer have hit system memory: */
mb ( ) ;
2014-09-08 10:57:28 -06:00
adreno_gpu_write ( adreno_gpu , REG_ADRENO_CP_RB_WPTR , wptr ) ;
2013-07-19 12:59:32 -04:00
}
2017-10-20 11:06:57 -06:00
bool adreno_idle ( struct msm_gpu * gpu , struct msm_ringbuffer * ring )
2013-07-19 12:59:32 -04:00
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
2017-10-20 11:06:57 -06:00
uint32_t wptr = get_wptr ( ring ) ;
2013-07-19 12:59:32 -04:00
2014-01-11 16:11:59 -05:00
/* wait for CP to drain ringbuffer: */
2017-10-20 11:06:57 -06:00
if ( ! spin_until ( get_rptr ( adreno_gpu , ring ) = = wptr ) )
2016-11-28 12:28:27 -07:00
return true ;
2013-07-19 12:59:32 -04:00
/* TODO maybe we need to reset GPU here to recover from hang? */
2017-10-20 11:07:01 -06:00
DRM_ERROR ( " %s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X \n " ,
gpu - > name , ring - > id , get_rptr ( adreno_gpu , ring ) , wptr ) ;
2016-11-28 12:28:27 -07:00
return false ;
2013-07-19 12:59:32 -04:00
}
# ifdef CONFIG_DEBUG_FS
void adreno_show ( struct msm_gpu * gpu , struct seq_file * m )
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
2014-09-05 15:05:38 -04:00
int i ;
2013-07-19 12:59:32 -04:00
seq_printf ( m , " revision: %d (%d.%d.%d.%d) \n " ,
adreno_gpu - > info - > revn , adreno_gpu - > rev . core ,
adreno_gpu - > rev . major , adreno_gpu - > rev . minor ,
adreno_gpu - > rev . patchid ) ;
2017-10-20 11:06:57 -06:00
for ( i = 0 ; i < gpu - > nr_rings ; i + + ) {
struct msm_ringbuffer * ring = gpu - > rb [ i ] ;
seq_printf ( m , " rb %d: fence: %d/%d \n " , i ,
ring - > memptrs - > fence , ring - > seqno ) ;
seq_printf ( m , " rptr: %d \n " ,
get_rptr ( adreno_gpu , ring ) ) ;
seq_printf ( m , " rb wptr: %d \n " , get_wptr ( ring ) ) ;
}
2014-09-05 15:05:38 -04:00
/* dump these out in a form that can be parsed by demsm: */
seq_printf ( m , " IO:region %s 00000000 00020000 \n " , gpu - > name ) ;
for ( i = 0 ; adreno_gpu - > registers [ i ] ! = ~ 0 ; i + = 2 ) {
uint32_t start = adreno_gpu - > registers [ i ] ;
uint32_t end = adreno_gpu - > registers [ i + 1 ] ;
uint32_t addr ;
for ( addr = start ; addr < = end ; addr + + ) {
uint32_t val = gpu_read ( gpu , addr ) ;
seq_printf ( m , " IO:R %08x %08x \n " , addr < < 2 , val ) ;
}
}
2013-07-19 12:59:32 -04:00
}
# endif
2015-04-19 10:14:09 -04:00
/* Dump common gpu status and scratch registers on any hang, to make
* the hangcheck logs more useful . The scratch registers seem always
* safe to read when GPU has hung ( unlike some other regs , depending
* on how the GPU hung ) , and they are useful to match up to cmdstream
* dumps when debugging hangs :
*/
void adreno_dump_info ( struct msm_gpu * gpu )
2013-12-22 10:29:43 -05:00
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
2017-10-20 11:06:57 -06:00
int i ;
2013-12-22 10:29:43 -05:00
printk ( " revision: %d (%d.%d.%d.%d) \n " ,
adreno_gpu - > info - > revn , adreno_gpu - > rev . core ,
adreno_gpu - > rev . major , adreno_gpu - > rev . minor ,
adreno_gpu - > rev . patchid ) ;
2017-10-20 11:06:57 -06:00
for ( i = 0 ; i < gpu - > nr_rings ; i + + ) {
struct msm_ringbuffer * ring = gpu - > rb [ i ] ;
printk ( " rb %d: fence: %d/%d \n " , i ,
ring - > memptrs - > fence ,
ring - > seqno ) ;
printk ( " rptr: %d \n " , get_rptr ( adreno_gpu , ring ) ) ;
printk ( " rb wptr: %d \n " , get_wptr ( ring ) ) ;
}
2015-04-19 10:14:09 -04:00
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
void adreno_dump ( struct msm_gpu * gpu )
{
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( gpu ) ;
int i ;
2014-09-05 15:05:38 -04:00
/* dump these out in a form that can be parsed by demsm: */
printk ( " IO:region %s 00000000 00020000 \n " , gpu - > name ) ;
for ( i = 0 ; adreno_gpu - > registers [ i ] ! = ~ 0 ; i + = 2 ) {
uint32_t start = adreno_gpu - > registers [ i ] ;
uint32_t end = adreno_gpu - > registers [ i + 1 ] ;
uint32_t addr ;
for ( addr = start ; addr < = end ; addr + + ) {
uint32_t val = gpu_read ( gpu , addr ) ;
printk ( " IO:R %08x %08x \n " , addr < < 2 , val ) ;
}
}
2013-12-22 10:29:43 -05:00
}
2017-10-20 11:06:57 -06:00
static uint32_t ring_freewords ( struct msm_ringbuffer * ring )
2013-07-19 12:59:32 -04:00
{
2017-10-20 11:06:57 -06:00
struct adreno_gpu * adreno_gpu = to_adreno_gpu ( ring - > gpu ) ;
uint32_t size = MSM_GPU_RINGBUFFER_SZ > > 2 ;
2017-10-20 11:06:59 -06:00
/* Use ring->next to calculate free size */
uint32_t wptr = ring - > next - ring - > start ;
2017-10-20 11:06:57 -06:00
uint32_t rptr = get_rptr ( adreno_gpu , ring ) ;
2014-01-11 16:11:59 -05:00
return ( rptr + ( size - 1 ) - wptr ) % size ;
}
2017-10-20 11:06:57 -06:00
void adreno_wait_ring ( struct msm_ringbuffer * ring , uint32_t ndwords )
2014-01-11 16:11:59 -05:00
{
2017-10-20 11:06:57 -06:00
if ( spin_until ( ring_freewords ( ring ) > = ndwords ) )
DRM_DEV_ERROR ( ring - > gpu - > dev - > dev ,
2017-11-02 09:33:45 +00:00
" timeout waiting for space in ringbuffer %d \n " ,
2017-10-20 11:06:57 -06:00
ring - > id ) ;
2013-07-19 12:59:32 -04:00
}
2017-11-21 12:40:55 -07:00
/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
static int adreno_get_legacy_pwrlevels ( struct device * dev )
{
struct device_node * child , * node ;
int ret ;
node = of_find_compatible_node ( dev - > of_node , NULL ,
" qcom,gpu-pwrlevels " ) ;
if ( ! node ) {
dev_err ( dev , " Could not find the GPU powerlevels \n " ) ;
return - ENXIO ;
}
for_each_child_of_node ( node , child ) {
unsigned int val ;
ret = of_property_read_u32 ( child , " qcom,gpu-freq " , & val ) ;
if ( ret )
continue ;
/*
* Skip the intentionally bogus clock value found at the bottom
* of most legacy frequency tables
*/
if ( val ! = 27000000 )
dev_pm_opp_add ( dev , val , 0 ) ;
}
return 0 ;
}
static int adreno_get_pwrlevels ( struct device * dev ,
struct msm_gpu * gpu )
{
unsigned long freq = ULONG_MAX ;
struct dev_pm_opp * opp ;
int ret ;
gpu - > fast_rate = 0 ;
/* You down with OPP? */
if ( ! of_find_property ( dev - > of_node , " operating-points-v2 " , NULL ) )
ret = adreno_get_legacy_pwrlevels ( dev ) ;
else {
ret = dev_pm_opp_of_add_table ( dev ) ;
if ( ret )
dev_err ( dev , " Unable to set the OPP table \n " ) ;
}
if ( ! ret ) {
/* Find the fastest defined rate */
opp = dev_pm_opp_find_freq_floor ( dev , & freq ) ;
if ( ! IS_ERR ( opp ) ) {
gpu - > fast_rate = freq ;
dev_pm_opp_put ( opp ) ;
}
}
if ( ! gpu - > fast_rate ) {
dev_warn ( dev ,
" Could not find a clock rate. Using a reasonable default \n " ) ;
/* Pick a suitably safe clock speed for any target */
gpu - > fast_rate = 200000000 ;
}
DBG ( " fast_rate=%u, slow_rate=27000000 " , gpu - > fast_rate ) ;
return 0 ;
}
2013-07-19 12:59:32 -04:00
int adreno_gpu_init ( struct drm_device * drm , struct platform_device * pdev ,
2017-10-20 11:06:57 -06:00
struct adreno_gpu * adreno_gpu ,
const struct adreno_gpu_funcs * funcs , int nr_rings )
2013-07-19 12:59:32 -04:00
{
2014-09-05 15:03:40 -04:00
struct adreno_platform_config * config = pdev - > dev . platform_data ;
2017-05-08 14:35:03 -06:00
struct msm_gpu_config adreno_gpu_config = { 0 } ;
2014-09-05 15:03:40 -04:00
struct msm_gpu * gpu = & adreno_gpu - > base ;
2013-07-19 12:59:32 -04:00
2014-09-05 15:03:40 -04:00
adreno_gpu - > funcs = funcs ;
adreno_gpu - > info = adreno_info ( config - > rev ) ;
adreno_gpu - > gmem = adreno_gpu - > info - > gmem ;
adreno_gpu - > revn = adreno_gpu - > info - > revn ;
adreno_gpu - > rev = config - > rev ;
2017-05-08 14:35:03 -06:00
adreno_gpu_config . ioname = " kgsl_3d0_reg_memory " ;
adreno_gpu_config . irqname = " kgsl_3d0_irq " ;
adreno_gpu_config . va_start = SZ_16M ;
adreno_gpu_config . va_end = 0xffffffff ;
2017-10-20 11:06:57 -06:00
adreno_gpu_config . nr_rings = nr_rings ;
2017-05-08 14:35:03 -06:00
2017-11-21 12:40:55 -07:00
adreno_get_pwrlevels ( & pdev - > dev , gpu ) ;
2018-05-07 16:47:50 -06:00
pm_runtime_set_autosuspend_delay ( & pdev - > dev ,
adreno_gpu - > info - > inactive_period ) ;
2017-07-27 10:42:39 -06:00
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
2017-10-20 11:06:56 -06:00
return msm_gpu_init ( drm , pdev , & adreno_gpu - > base , & funcs - > base ,
2017-05-08 14:35:03 -06:00
adreno_gpu - > info - > name , & adreno_gpu_config ) ;
2013-07-19 12:59:32 -04:00
}
2017-02-06 10:39:29 -07:00
void adreno_gpu_cleanup ( struct adreno_gpu * adreno_gpu )
2013-07-19 12:59:32 -04:00
{
2018-02-01 12:15:16 -07:00
unsigned int i ;
for ( i = 0 ; i < ARRAY_SIZE ( adreno_gpu - > info - > fw ) ; i + + )
release_firmware ( adreno_gpu - > fw [ i ] ) ;
2016-05-26 16:24:35 -04:00
2017-10-20 11:06:56 -06:00
msm_gpu_cleanup ( & adreno_gpu - > base ) ;
2013-07-19 12:59:32 -04:00
}